别着急,坐和放宽
使用社交账号登录
EFK 是一套完整的日志收集、存储、分析和可视化解决方案:
应用程序 → 日志文件 → Filebeat → Elasticsearch → Kibana
mkdir efk-stack && cd efk-stack
mkdir -p es-data kibana-data logs
创建 docker-compose.yml 文件:
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:9.2.2
container_name: elasticsearch
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms2g -Xmx2g
- xpack.security.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./es-data:/usr/share/elasticsearch/data
ports:
- "9200:9200"
networks:
- efk
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
interval: 30s
timeout: 10s
retries: 5
kibana:
image: docker.elastic.co/kibana/kibana:9.2.2
container_name: kibana
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- SERVER_NAME=kibana
- I18N_LOCALE=zh-CN
ports:
- "5601:5601"
volumes:
- ./kibana-data:/usr/share/kibana/data
networks:
- efk
depends_on:
elasticsearch:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:5601/api/status || exit 1"]
interval: 30s
timeout: 10s
retries: 5
filebeat:
image: docker.elastic.co/beats/filebeat:9.2.2
container_name: filebeat
user: root
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- KIBANA_HOST=http://kibana:5601
volumes:
- ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- ./logs:/var/log/app:ro
- filebeat-data:/usr/share/filebeat/data
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
command: filebeat -e --strict.perms=false
networks:
- efk
depends_on:
elasticsearch:
condition: service_healthy
kibana:
condition: service_healthy
restart: unless-stopped
networks:
efk:
driver: bridge
volumes:
filebeat-data:
driver: local
应用程序的日志也需要映射到./logs
创建 filebeat.yml 文件:
你也可以把 filebeat 集成到你的应用程序容器中,然后为每个 app 配置独立的 filebeat 配置文件,这样就可以实现日志的独立收集和存储。
如下:
FROM golang:1.23.10-alpine
# Install required packages
RUN apk update && apk add --no-cache libc6-compat curl jq
# easyExcel export missing font
RUN apk add --update ttf-dejavu fontconfig && rm -rf /var/cache/apk/*
# Download and install Filebeat
RUN curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.18.4-linux-x86_64.tar.gz \
&& tar xzvf filebeat-8.18.4-linux-x86_64.tar.gz \
&& mv filebeat-8.18.4-linux-x86_64 /usr/share/filebeat \
&& rm -f filebeat-8.18.4-linux-x86_64.tar.gz
注意:跨版本使用可能会导致配置不兼容,建议使用统一版本。
# 修改 Elasticsearch 和 Kibana 数据目录权限
sudo chown -R 1000:1000 es-data kibana-data
# 确保日志目录可读
chmod 755 logs
打开浏览器访问:http://localhost:5601

请确保应用日志写到./logs目录下,或者你也可以在 docker-compose.yml 中修改日志路径。

ELK App Log
---
filebeat.inputs:
# 应用程序日志配置
- type: filestream
id: app1 # filestream 必须有唯一的 ID
enabled: true
paths:
- /var/log/app/app1/*.log
parsers:
- multiline:
type: pattern
pattern: '^(\d{4}-\d{2}-\d{2}|{)' # 匹配 2025-01-01 或{开头的行
negate: true # 匹配非模式,即以非 2025-01-01 或{开头的行,然后合并在上一个匹配行中
match: after
- ndjson: # 如果是 JSON 行,尝试解析
keys_under_root: true
add_error_key: true
overwrite_keys: true
ignore_decoding_error: true
fields:
# 下面的配置是自定义的,可以用于区分不同日志来源
log_type: application
app_name: app1
environment: production
fields_under_root: true
- type: filestream
id: app2 # filestream 必须有唯一的 ID
enabled: true
paths:
- /var/log/app/app2/*.log
parsers:
- multiline:
type: pattern
pattern: '^(\d{4}-\d{2}-\d{2}|{)'
negate: true
match: after
- ndjson: # 如果是 JSON 行,尝试解析
keys_under_root: true
add_error_key: true
overwrite_keys: true
ignore_decoding_error: true
fields:
log_type: application
app_name: app2
environment: development
fields_under_root: true
# Elasticsearch 输出配置
output.elasticsearch:
hosts: ["${ELASTICSEARCH_HOSTS}"]
index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}"
setup.kibana:
host: "${KIBANA_HOST}"
setup.dashboards.enabled: true
setup.template.enabled: true
setup.template.name: "filebeat"
setup.template.pattern: "filebeat-*"
setup.ilm.enabled: true
logging.level: info
logging.to_files: false
logging.to_stderr: true
# 启动所有服务
docker-compose up -d
# 查看服务状态
docker-compose ps
# 检查 Elasticsearch
curl http://localhost:9200
# 检查 Elasticsearch 集群健康状态
curl http://localhost:9200/_cluster/health?pretty
# 检查 Filebeat 索引
curl http://localhost:9200/_cat/indices?v