docker-compose启动efk
使用docker-compose一键快速启动efk单节点集群,用于快速进行测试使用。
efk配置
#[Tuesday 01/06/26 15:19:41]{/opt/efk_qudat}
#{root@node5}# cat docker-compose.yml
version: '3'
networks:
efkk:
ipam:
driver: default
config:
- subnet: "172.22.6.0/24"
services:
zookepper:
image: registry.cn-hangzhou.aliyuncs.com/zhengqing/zookeeper:latest
container_name: zookeeper-server
restart: unless-stopped
volumes:
- "/etc/localtime:/etc/localtime"
environment:
ALLOW_ANONYMOUS_LOGIN: yes
expose:
- "2181"
networks:
efkk:
ipv4_address: 172.22.6.11
kafka:
image: registry.cn-hangzhou.aliyuncs.com/zhengqing/kafka:latest
#image: register.hrttest.cn/tools/apache/kafka:4.0.0
container_name: kafka
restart: unless-stopped
env_file:
- ./.env
volumes:
- "/etc/localtime:/etc/localtime"
#- "./app/kafka:/bitnami/kafka:rw"
environment:
KAFKA_HEAP_OPTS: "-Xmx1024m -Xms1024m"
ALLOW_PLAINTEXT_LISTENER: yes
#KAFKA_CFG_ZOOKEEPER_CONNECT: zookepper:2181
KAFKA_ZOOKEEPER_CONNECT: zookepper:2181
#KAFKA_`CFG_ADVERTISED_LISTENERS: PLAINTEXT://${KAFKA_EXTERNAL_HOST}:9091
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${KAFKA_EXTERNAL_HOST}:9092
#日志清理策略(不保留日志)
KAFKA_CFG_LOG_CLEANUP_POLICY: "delete"
KAFKA_CFG_LOG_RETENTION_HOURS: "0" # 时间优先级关闭
KAFKA_CFG_LOG_RETENTION_BYTES: "10485760" # 10MB
KAFKA_CFG_LOG_SEGMENT_BYTES: "5242880" # 5MB per segment
KAFKA_CFG_LOG_RETENTION_CHECK_INTERVAL_MS: "60000" # 每分钟检查一次
ports:
- "9092:9092"
depends_on:
- zookepper
links:
- zookepper
networks:
efkk:
ipv4_address: 172.22.6.12
kafka-map:
image: register.hrttest.cn/tools/zhengqing/kafka-map # 原镜像`dushixiang/kafka-map:latest`
container_name: kafka-map # 容器名为'kafka-map'
restart: unless-stopped # 指定容器退出后的重启策略为始终重启,但是不考虑在Docker守护进程启动时就已经停止了的
#容器
volumes:
- "./kafka/kafka-map/data:/usr/local/kafka-map/data"
environment:
DEFAULT_USERNAME: "admin"
DEFAULT_PASSWORD: "testtesttest."
ports: # 映射端口
- "12377:8080"
depends_on: # 解决容器依赖启动先后问题
- kafka
networks:
efkk:
ipv4_address: 172.22.6.16
elasticsearch:
image: registry.cn-hangzhou.aliyuncs.com/zhengqing/elasticsearch:7.14.1 # 原镜像`elasticsearch:7.14.1`
container_name: elasticsearchs # 容器名为'elasticsearch'
restart: unless-stopped # 指定容器退出后的重启策略为始终重启,但是不考虑在Docker守护进程启动时就已经停止了的容器
volumes: # 数据卷挂载路径设置,将本机目录映射到容器目录
- "/etc/localtime:/etc/localtime"
- "./app/elasticsearch/data:/usr/share/elasticsearch/data"
- "./app/elasticsearch/logs:/usr/share/elasticsearch/logs"
- "./app/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml"
# - "./app/elasticsearch/config/jvm.options:/usr/share/elasticsearch/config/jvm.options"
- "./app/elasticsearch/plugins/ik:/usr/share/elasticsearch/plugins/ik" # IK中文分词插件
environment: # 设置环境变量,相当于docker run命令中的-e
TZ: Asia/Shanghai
LANG: en_US.UTF-8
TAKE_FILE_OWNERSHIP: "true"
discovery.type: single-node
ES_JAVA_OPTS: "-Xmx1024m -Xms1024m"
ELASTIC_PASSWORD: "t1izUrGP6uiC7aZBG1m5"
expose:
- "9200"
- "9300"
networks:
efkk:
ipv4_address: 172.22.6.13
kibana:
image: registry.cn-hangzhou.aliyuncs.com/zhengqing/kibana:7.14.1 # 原镜像`kibana:7.14.1`
container_name: kibana
restart: unless-stopped
volumes:
- "/etc/localtime:/etc/localtime"
- "./app/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml"
ports:
- "5601:5601"
depends_on:
- elasticsearch
links:
- elasticsearch
networks:
efkk:
ipv4_address: 172.22.6.14
fluentd:
image: bbowenhhuang/fluentd-es-kafka-plugin:v1.0.0
container_name: fluentd
restart: unless-stopped
volumes:
- "/etc/localtime:/etc/localtime"
- "./app/fluentd/conf:/fluentd/etc"
- "./app/fluentd/log:/fluentd/log"
depends_on:
- elasticsearch
- kafka
links:
- "elasticsearch"
- "kafka"
expose:
- "24224"
- "24224/udp"
extra_hosts:
- "kafka:172.22.6.12"
networks:
efkk:
ipv4_address: 172.22.6.15
es配置
#cat elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
http.port: 9200
# 开启es跨域
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
# 开启安全控制
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
es jvm配置
#cat jvm.options
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
-Xms1g
-Xmx1g
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
## G1GC Configuration
# NOTE: G1 GC is only supported on JDK version 10 or later
# to use G1GC, uncomment the next two lines and update the version on the
# following three lines to your version of the JDK
# 10-13:-XX:-UseConcMarkSweepGC
# 10-13:-XX:-UseCMSInitiatingOccupancyOnly
14-:-XX:+UseG1GC
14-:-XX:G1ReservePercent=25
14-:-XX:InitiatingHeapOccupancyPercent=30
## JVM temporary directory
-Djava.io.tmpdir=${ES_TMPDIR}
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps; ensure the directory exists and
# has sufficient space
-XX:HeapDumpPath=data
# specify an alternative path for JVM fatal error logs
-XX:ErrorFile=logs/hs_err_pid%p.log
## JDK 8 GC logging
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:logs/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging
9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m
fluent配置
cat fluent.conf
<source>
@type kafka_group
brokers kafka:9092
consumer_group fluentd-log
topics LOG_APP
format json
#message_key <key (Optional, for text format only, default is message)>
#kafka_message_key <key (Optional, If specified, set kafka's message key to this key)>
#add_headers <If true, add kafka's message headers to record>
#add_prefix <tag prefix (Optional)>
#add_suffix <tag suffix (Optional)>
retry_emit_limit 10
time_source kafka
#use_record_time (Deprecated. Use 'time_source record' instead.) <If true, replace event time with contents of 'time' field of fetched record>
#time_format %Y-%m-%d %H:%M:%S
# ruby-kafka consumer options
max_bytes 1048576
max_wait_time 3
min_bytes 10000
offset_commit_interval 3
offset_commit_threshold 100
fetcher_max_queue_size 200
refresh_topic_interval 300
start_from_beginning true
</source>
<source>
@type kafka_group
brokers kafka:9092
consumer_group fluentd-log
topics LOG_INF
format json
#message_key <key (Optional, for text format only, default is message)>
#kafka_message_key <key (Optional, If specified, set kafka's message key to this key)>
#add_headers <If true, add kafka's message headers to record>
#add_prefix <tag prefix (Optional)>
#add_suffix <tag suffix (Optional)>
retry_emit_limit 10
time_source kafka
#use_record_time (Deprecated. Use 'time_source record' instead.) <If true, replace event time with contents of 'time' field of fetched record>
#time_format %Y-%m-%d %H:%M:%S
# ruby-kafka consumer options
max_bytes 1048576
max_wait_time 3
min_bytes 10000
offset_commit_interval 3
offset_commit_threshold 100
fetcher_max_queue_size 200
refresh_topic_interval 300
start_from_beginning true
</source>
<source>
@type kafka_group
brokers kafka:9092
consumer_group fluentd-log
topics LOG_SRV
format json
#message_key <key (Optional, for text format only, default is message)>
#kafka_message_key <key (Optional, If specified, set kafka's message key to this key)>
#add_headers <If true, add kafka's message headers to record>
#add_prefix <tag prefix (Optional)>
#add_suffix <tag suffix (Optional)>
retry_emit_limit 10
time_source kafka
#use_record_time (Deprecated. Use 'time_source record' instead.) <If true, replace event time with contents of 'time' field of fetched record>
#time_format %Y-%m-%d %H:%M:%S
# ruby-kafka consumer options
max_bytes 1048576
max_wait_time 3
min_bytes 10000
offset_commit_interval 3
offset_commit_threshold 100
fetcher_max_queue_size 200
refresh_topic_interval 300
start_from_beginning true
</source>
<match LOG_APP>
@type elasticsearch
hosts elasticsearch:9200
user elastic
password t1izUrGP6uiC7aZBG1m5
type_name _doc
logstash_format true
logstash_prefix hrt-app
logstash_prefix_separator -
logstash_dateformat %Y.%m.%d
reload_connections true
reload_on_failure true
<buffer>
@type memory
flush_interval 60s
chunk_limit_size 8M
total_limit_size 512M
overflow_action block
</buffer>
</match>
<match LOG_INF>
@type elasticsearch
hosts elasticsearch:9200
user elastic
password t1izUrGP6uiC7aZBG1m5
type_name _doc
logstash_format true
logstash_prefix hrt-inf
logstash_prefix_separator -
logstash_dateformat %Y.%m.%d
reload_connections true
reload_on_failure true
<buffer>
@type memory
flush_interval 60s
chunk_limit_size 8M
total_limit_size 512M
overflow_action block
</buffer>
</match>
<match LOG_SRV>
@type elasticsearch
hosts elasticsearch:9200
user elastic
password t1izUrGP6uiC7aZBG1m5
type_name _doc
logstash_format true
logstash_prefix hrt-srv
logstash_prefix_separator -
logstash_dateformat %Y.%m.%d
reload_connections true
reload_on_failure true
<buffer>
@type memory
flush_interval 60s
chunk_limit_size 8M
total_limit_size 512M
overflow_action block
</buffer>
</match>
kibana配置
#cat kibana.yml
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0.0.0.0"
server.publicBaseUrl: "http://kibana:5601"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "elastic"
elasticsearch.password: "suijimimashengcheng23132"
i18n.locale: zh-CN
关注我们,获取更多DevOps和安全更新资讯!
本文作者:运维技术团队:辣个男人Devin
发布日期:2026年01月05日
适用系统:Linux