Docker(八): 安装ELK
阅读原文时间:2023年07月09日阅读:1

服务部署发展

应用程序部署在单节点中,日志资源同样输出到这台单节点物理机的存储介质中。

以分布式,集群的方式部署应用,应用分别部署在不同的物理机中,日志分别输出到应用部署的那台物理机中。

应用以docker容器的方式部署在K8S平台中,应用日志输出到K8S的各个Pod节点中。

系统架构

分布式搜索和分析引擎。聚合和丰富您的数据并将其存储在Elasticsearch中。elasticsearch负责存储日志和处理查询。

实时流水线功能的开源数据收集引擎。可以动态统一来自不同来源的数据。logstash负责收集日志,整理日志并将日志发送至Elasticsearch中保存。

开源分析和可视化平台。kibana用于UI展示。

转发和集中日志数据的轻量级中转程序。收集您指定的日志并转发到指定的位置。filebeat负责收集日志并将日志转送至您指定的位置。

部署ELK

version: '2'
services:
  elasticsearch:
    container_name: elasticsearch
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.1
    ports:
      - "9200:9200"
    volumes:
      - /mydata/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /mydata/elasticsearch/data:/usr/share/elasticsearch/data
      - /mydata/elasticsearch/plugins:/usr/share/elasticsearch/plugins
    environment:
      - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
      - "discovery.type=single-node"
      - "COMPOSE_PROJECT_NAME=elasticsearch-server"
    restart: 'no'

  kibana:
    depends_on:
      - elasticsearch
    container_name: kibana
    image: docker.elastic.co/kibana/kibana:7.8.1
    ports:
      - "5601:5601"
    restart: 'no'
    environment:
      - ELASTICSEARCH_HOSTS=http://192.168.1.20:9200

  filebeat:
    container_name: filebeat
    image: docker.elastic.co/beats/filebeat:7.8.1
    user: root
    volumes:
      - /home/chinda/log:/var/log
      - /mydata/filebeat/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - /var/run/docker.sock:/var/run/docker.sock:ro
    command: filebeat -e -strict.perms=false

  logstash:
    container_name: logstash
    image: docker.elastic.co/logstash/logstash:7.8.1
    ports:
      - 5044:5044
    restart: 'no'
    volumes:
      - /mydata/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
      - /mydata/logstash/settings/logstash.yml:/usr/share/logstash/settings/logstash.yml
    command: bin/logstash --config.reload.automatic --http.port 9600


filebeat.inputs:
  - type: log
    multiline:
      pattern: '^\d{4}-\d{2}-\d{2}'
      negate: true
      match: after
    tags: ['chinda']
    fields:
      app_id: chinda_app
    exclude_lines: ['^DBG']
    paths:
      - /var/log/*/*.log

output.logstash:
  hosts: ["192.168.1.20:5044"]


# The # character at the beginning of a line indicates a comment. Use
# comments to describe your configuration.
input {
    beats {
        port => "5044"
    }
}
# The filter part of this file is commented out to indicate that it is
# optional.
filter {
    grok {
        match => { "message" => "%{TIMESTAMP_ISO8601:log_date}\s*%{LOGLEVEL:log_level}\s*%{POSINT}\s*---\s*\[%{GREEDYDATA}\]\s*%{JAVAFILE:log_class}(.*?[:])\s*(?<log_content>.*$)" }
    }

    date {
        timezone => "Asia/Shanghai"
        match => [ "log_date", "yyyy-MM-dd HH:mm:ss.SSS" ]
    }
}

output {
    elasticsearch {
        hosts => [ "192.168.1.20:9200" ]
        index => "chinda_index"
    }
}

注意: grok匹配日志格式为:2020-10-13 14:58:26.801 WARN 25810 --- [o-auto-1-exec-7] com.zaxxer.hikari.pool.PoolBase : HikariPool-1 - Failed to validate connection com.mysql.cj.jdbc.ConnectionImpl@2512a45f (No operations allowed after connection closed.). Possibly consider using a shorter maxLifetime value.