# docker-compose.yml
version: '2'
volumes:
mariadb:
spark:
services:
spark-master:
#build: .
image: docker.io/bitnami/spark:3-debian-10
user: root
volumes:
- spark:/data
environment:
- SPARK_MODE=master
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
ports:
- '8080:8080'
- '4040:4040'
spark-worker:
image: docker.io/bitnami/spark:3-debian-10
user: root
volumes:
- spark:/data
environment:
- SPARK_MODE=worker
- SPARK_MASTER_URL=spark://spark-master:7077
- SPARK_WORKER_MEMORY=1G
- SPARK_WORKER_CORES=1
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
mariadb:
image: mariadb
ports:
- 3306:3306
volumes:
- mariadb:/var/lib/mysql
- ./anon_sync_20210310.sql:/docker-entrypoint-initdb.d/01-anon_sync_20210310.sql
environment:
MYSQL_ROOT_PASSWORD: maria
MYSQL_DATABASE: test
#docker-compose.override.yml
version: "2"
networks:
# This special network is configured so that the local metadata
# service can bind to the specific IP address that ECS uses
# in production
credentials_network:
driver: bridge
ipam:
config:
- subnet: "169.254.170.0/24"
gateway: 169.254.170.1
services:
# This container vends credentials to your containers
ecs-local-endpoints:
# The Amazon ECS Local Container Endpoints Docker Image
image: amazon/amazon-ecs-local-container-endpoints
volumes:
# Mount /var/run so we can access docker.sock and talk to Docker
- /var/run/docker.sock:/var/run/docker.sock
environment:
# Share local AWS credentials with ECS service
AWS_ACCESS_KEY_ID: "${AWS_ACCESS_KEY_ID}"
AWS_SECRET_ACCESS_KEY: "${AWS_SECRET_ACCESS_KEY}"
AWS_SESSION_TOKEN: "${AWS_SESSION_TOKEN}"
AWS_DEFAULT_REGION: "eu-west-1"
networks:
credentials_network:
# This special IP address is recognized by the AWS SDKs and AWS CLI
ipv4_address: "169.254.170.2"
# Here we reference the application container(s) that we are testing
# You can test multiple containers at a time, simply duplicate this section
# and customize it for each container, and give it a unique IP in 'credentials_network'.
spark-master:
depends_on:
- ecs-local-endpoints
networks:
- credentials_network
- default
environment:
AWS_DEFAULT_REGION: "eu-west-1"
AWS_CONTAINER_CREDENTIALS_RELATIVE_URI: "/creds"
#ECS_CONTAINER_METADATA_URI: "http://ecs-local-endpoints/v3"
spark-worker:
depends_on:
- ecs-local-endpoints
networks:
- credentials_network
- default
environment:
AWS_DEFAULT_REGION: "eu-west-1"
AWS_CONTAINER_CREDENTIALS_RELATIVE_URI: "/creds"
#ECS_CONTAINER_METADATA_URI: "http://ecs-local-endpoints/v3"