docker-elastic-logs
使用 Docker 和 Docker Compose 运行最新版本的 Elastic stack
它使您能够使用 Elasticsearch 的搜索/聚合功能和 Kibana 的可视化功能来分析任何数据集。
Elasticsearch & Kibana
多节点集群
1)环境准备
在新的空目录中创建以下配置文件。这些文件也可以从 GitHub 上的 elasticsearch 存储库中获得。
.env
.env
文件设置运行 docker-compose.yml 配置文件时使用的环境变量。确保使用 ELASTIC_PASSWORD 和 KIBANA_PASSWORD 变量为 elastic 和 kibana_system 用户指定强密码。这些变量由 docker-compose.yml 文件引用。
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=elastic
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=kibana
# Version of Elastic products
STACK_VERSION=8.2.0
# Set the cluster name
CLUSTER_NAME=elastdocker-cluster
# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial
# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200
# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80
# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824
# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=elastic
docker-compose.yml
这个 docker-compose.yml
文件创建了一个启用了身份验证和网络加密的三节点安全 Elasticsearch 集群,以及一个安全连接到它的 Kibana 实例
Exposing ports:如果您不想将端口 9200 暴露给外部主机,请将
.env
文件中的ES_PORT
的值设置为127.0.0.1:9200
之类的值。 Elasticsearch 将只能从主机本身访问。
version: "2.2"
services:
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
es02:
depends_on:
- es01
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata02:/usr/share/elasticsearch/data
environment:
- node.name=es02
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es03
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es02/es02.key
- xpack.security.http.ssl.certificate=certs/es02/es02.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es02/es02.key
- xpack.security.transport.ssl.certificate=certs/es02/es02.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
es03:
depends_on:
- es02
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata03:/usr/share/elasticsearch/data
environment:
- node.name=es03
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es02
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es03/es03.key
- xpack.security.http.ssl.certificate=certs/es03/es03.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es03/es03.key
- xpack.security.transport.ssl.certificate=certs/es03/es03.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
es02:
condition: service_healthy
es03:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
volumes:
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
volumes:
certs:
driver: local
esdata01:
driver: local
esdata02:
driver: local
esdata03:
driver: local
kibanadata:
driver: local
2)启动集群
创建并启动三节点 Elasticsearch 集群和 Kibana 实例
$ docker-compose up -d
3)测试访问
部署开始后,打开浏览器并导航到 http://localhost:5601 以访问 Kibana,您可以在其中加载示例数据并与集群交互。
4)停止并删除部署
要停止集群,请运行 docker-compose down。当您使用 docker-compose up 重新启动集群时,Docker 卷中的数据会被保留并加载。
$ docker-compose down
要在停止集群时删除网络、容器和卷,请指定 -v 选项:
$ docker-compose down -v
单节点集群
.env
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=elastic
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=kibana
# Version of Elastic products
STACK_VERSION=8.2.0
# Set the node and cluster name
CLUSTER_NAME=elastdocker-cluster
NODE_NAME=elastdocker-node-0
INIT_MASTER_NODE=elastdocker-node-0
# Hostnames of master eligble elasticsearch instances. (matches compose generated host name)
DISCOVERY_SEEDS_HOSTS=elasticsearch
# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial
# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200
# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80
# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824
# Project namespace (defaults to the current folder name if not set)
COMPOSE_PROJECT_NAME=elastic
docker-compose.yml
version: "2.2"
services:
setup:
image: registry.cn-shenzhen.aliyuncs.com/aluo/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: elasticsearch\n"\
" dns:\n"\
" - elasticsearch\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: kibana\n"\
" dns:\n"\
" - kibana\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://elasticsearch:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/elasticsearch/elasticsearch.crt ]"]
interval: 1s
timeout: 5s
retries: 120
elasticsearch:
depends_on:
setup:
condition: service_healthy
image: registry.cn-shenzhen.aliyuncs.com/aluo/elasticsearch:${STACK_VERSION}
volumes:
#- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- certs:/usr/share/elasticsearch/config/certs
- esdata:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=${NODE_NAME}
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=${INIT_MASTER_NODE}
- discovery.seed_hosts=${DISCOVERY_SEEDS_HOSTS}
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/elasticsearch/elasticsearch.key
- xpack.security.http.ssl.certificate=certs/elasticsearch/elasticsearch.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/elasticsearch/elasticsearch.key
- xpack.security.transport.ssl.certificate=certs/elasticsearch/elasticsearch.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
elasticsearch:
condition: service_healthy
image: registry.cn-shenzhen.aliyuncs.com/aluo/kibana:${STACK_VERSION}
volumes:
#- ./kibana.yml:/usr/share/kibana/config/kibana.yml
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://elasticsearch:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
volumes:
certs:
driver: local
esdata:
driver: local
kibanadata:
driver: local
Elasticsearch & Kibana & Filebeat
.env
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=elastic
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=kibana
# Version of Elastic products
STACK_VERSION=8.2.0
# Set the node and cluster name
CLUSTER_NAME=elastdocker-cluster
NODE_NAME=elastdocker-node-0
INIT_MASTER_NODE=elastdocker-node-0
# Hostnames of master eligble elasticsearch instances. (matches compose generated host name)
DISCOVERY_SEEDS_HOSTS=elasticsearch
# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial
# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200
# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80
# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824
# Project namespace (defaults to the current folder name if not set)
COMPOSE_PROJECT_NAME=elastic
docker-compose.yml
version: "2.2"
services:
setup:
image: registry.cn-shenzhen.aliyuncs.com/aluo/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: elasticsearch\n"\
" dns:\n"\
" - elasticsearch\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: kibana\n"\
" dns:\n"\
" - kibana\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://elasticsearch:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/elasticsearch/elasticsearch.crt ]"]
interval: 1s
timeout: 5s
retries: 120
elasticsearch:
depends_on:
setup:
condition: service_healthy
image: registry.cn-shenzhen.aliyuncs.com/aluo/elasticsearch:${STACK_VERSION}
volumes:
#- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- certs:/usr/share/elasticsearch/config/certs
- esdata:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=${NODE_NAME}
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=${INIT_MASTER_NODE}
- discovery.seed_hosts=${DISCOVERY_SEEDS_HOSTS}
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/elasticsearch/elasticsearch.key
- xpack.security.http.ssl.certificate=certs/elasticsearch/elasticsearch.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/elasticsearch/elasticsearch.key
- xpack.security.transport.ssl.certificate=certs/elasticsearch/elasticsearch.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
elasticsearch:
condition: service_healthy
image: registry.cn-shenzhen.aliyuncs.com/aluo/kibana:${STACK_VERSION}
volumes:
#- ./kibana.yml:/usr/share/kibana/config/kibana.yml
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://elasticsearch:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
filebeat:
depends_on:
kibana:
condition: service_healthy
image: registry.cn-shenzhen.aliyuncs.com/aluo/filebeat:${STACK_VERSION}
restart: always
# -e flag to log to stderr and disable syslog/file output
command: -e --strict.perms=false
user: root
ports:
- 5066:5066
environment:
ELASTIC_USERNAME: elastic
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD}
KIBANA_HOST_PORT: https://kibana:5601
ELASTICSEARCH_HOST_PORT: https://elasticsearch:9200
volumes:
- ./filebeat.docker.logs.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- filebeatdata:/var/lib/filebeat/data
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5066/stats | grep -q 'HTTP/1.1 200 OK'",
]
interval: 10s
timeout: 10s
retries: 120
volumes:
certs:
driver: local
esdata:
driver: local
kibanadata:
driver: local
filebeatdata:
driver: local
filebeat.docker.logs.yml
#================================== Description ========================================
# Filebeat Config to send Elasticsearch/Logstash/Kibana in a docker host to Elasticsea-
# sh cluster.
name: filebeat-docker-logs-shipper
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
#================================ Autodiscover =======================================
# Autodiscover all containers with elasticsearch images, and add an separate input for
# each container and log type.
filebeat.autodiscover:
providers:
- type: docker
templates:
- condition:
and:
- not.contains:
docker.container.image: elasticsearch
- not.contains:
docker.container.image: logstash
- not.contains:
docker.container.image: kibana
config:
- type: container
paths:
- /var/lib/docker/containers/${data.docker.container.id}/*.log
processors:
- add_cloud_metadata: ~
- add_docker_metadata: ~
# Output to Logstash
#output.logstash:
# hosts: ["logstash:5044"]
# Output to Elasticsearch
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOST_PORT}'
username: '${ELASTIC_USERNAME}'
password: '${ELASTIC_PASSWORD}'
ssl:
verification_mode: "none"
#=================================== Kibana ==========================================
# Enable setting up Kibana
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup:
kibana:
host: '${KIBANA_HOST_PORT}'
username: '${ELASTIC_USERNAME}'
password: '${ELASTIC_PASSWORD}'
#==================================== Monitoring =====================================
# Enable Monitoring Beats
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch
# Use deprecated option to avoid current UX bug in 7.3.0 where filebeat creates a
# standalone monitoring cluster in the monitoring UI.
# see: https://github.com/elastic/beats/pull/13182
xpack.monitoring:
enabled: true
elasticsearch:
hosts: '${ELASTICSEARCH_HOST_PORT}'
username: '${ELASTIC_USERNAME}'
password: '${ELASTIC_PASSWORD}'
#monitoring:
# enabled: true
# elasticsearch:
# hosts: '${ELASTICSEARCH_HOST_PORT}'
# username: '${ELASTIC_USERNAME}'
# password: '${ELASTIC_PASSWORD}'
#================================ HTTP Endpoint ======================================
# Enabled so we can monitor filebeat using filebeat exporter if needed.
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
http.enabled: true
http.host: 0.0.0.0
http.port: 5066