Hello
I am trying to to create a swarm cluster with several aws EC2 instance. I use logstash and filebeats for parsing log files. I have next configuration of docker-compose:
logstash:
image: docker.elastic.co/logstash/logstash:$ELK_VERSION
deploy:
replicas: 1
placement:
constraints:
- 'node.role == manager'
container_name: logstash
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./logstash/pipeline:/usr/share/logstash/pipeline
- ./load:/load
ports:
- "5044:5044"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
depends_on:
- elasticsearch
filebeat_master:
image: docker.elastic.co/beats/filebeat:6.7.0
command: filebeat -e -strict.perms=false
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- 'node.role == manager'
volumes:
- "./filebeat/config/filebeat_master.yml:/usr/share/filebeat/filebeat.yml:ro"
- ./load:/load
- /var/run/docker.sock:/var/run/docker.sock
restart: "no"
links:
- "logstash:logstash"
depends_on:
- logstash
- elasticsearch
- kibana
filebeat_slave:
image: docker.elastic.co/beats/filebeat:6.7.0
command: filebeat -e -strict.perms=false
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- 'node.role != manager'
volumes:
- "./filebeat/config/filebeat_slave.yml:/usr/share/filebeat/filebeat.yml:ro"
- /var/run/docker.sock:/var/run/docker.sock - ./load:/load
config of filebeat_master:
filebeat:
prospectors:
-
paths:
- /load/*.log
input_type: log
output:
logstash:
hosts: ["logstash:5044"]
config of filebeat_slave:
filebeat:
prospectors:
-
paths:
- /load/*.log
input_type: log
output:
logstash:
hosts: ["<EC2_MASTER_EXTERNAL_IP>:5044"]
The question is:
can I somehow reach logstash by servicename on swarm slave node not using EC2_MASTER_EXTERNAL_IP? (If I use it in current setup - i get errors )
Any help appreciated
thank in advance