0
点赞
收藏
分享

微信扫一扫

Docker搭建ELK日志日志分析系统

最近公司要求搭建ELK日志系统将日志维护起来,网上看没有几个能直接跑起来的,遇到了挺多卡,这里简单分享下配置

版本号

工具版本号
elasticsearch7.16.1
logstash7.16.1
kibana7.16.1
filebeat7.16.1
  1. Filebeat

filebeat.yml:(定义filebeat配置文件)

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /你项目的路径/*.log
  scan_frequency: 10s #查询的频率
  #下面4行,意思是将正则匹配不到的行合并到上一行的行尾
  multiline.type: pattern
  multiline.pattern: '^\[[INFO|ERROR|WARN]'
  multiline.negate: true
  multiline.match: after
  #tags: ["logapp"]
  fields:
    index: "dispatcher"
  #如果设置为true,Filebeat从文件尾开始监控文件新增内容,把新增的每一行文件作为一个事件依次发送而不是>从文件开始处重新发送所有内容
  tail_files: false
#============================= Filebeat modules ===============================
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: true

output.logstash:
  hosts: ["IP:5044"] #IP为logstash安装的服务器ip
  enabled: true
  1. Logstash

logstash.yml:(定义logstash配置文件)

http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://es01:9200" ]
xpack.monitoring.enabled: false
 #权限
# xpack.monitoring.elasticsearch.username: "elastic"
# xpack.monitoring.elasticsearch.password: "123123"


path.config: /usr/share/logstash/config/conf.d/*.conf
path.logs: /usr/share/logstash/logs

#不转义\n等数据
config.support_escapes: false

conf.d目录下新建logstash.conf:(定义过滤管道)

input {
   beats {
     port  =>  "5044"
   }
}

filter { 
    grok{ 
     match => { 
         "message"=>"\[%{LOGLEVEL:Level}\] %{TIMESTAMP_ISO8601:Timestamp} %{DATA:PackageName}\)<%{DATA:Thread}>" 
                 }
    }   
    grok{ 
     match => { 
         "message"=>">\n(?<Test>.*?)\n" 
                 }
    }   
    grok{ 
     match => { 
          "message"=>".*(?<Exception>org\S+?Exception)" 
                  }
    }   
    grok{ 
     match => { 
         "message"=>".*CallNo=(?<CallNo>\w+)" 
             }
    }   
    grok{ 
     match => { 
                  "message"=>".*CallSheetID=(?<CallSheetID>\S+?)&" 
                  }
    }     
    grok{ 
     match => { 
                 "message"=>".*CalledNo=(?<CalledNo>\w+)" 
         }
    }    
    date {
    match => [ "Timestamp", "yyyy-MM-dd HH:mm:ss,SSS" ]
   }
    mutate{
              replace => ["Hostname","%{[agent][hostname]}"]
              replace => ["FilePath","%{[log][file][path]}"]
             remove_field => ['host','ecs','@version','Timestamp','log','agent','input','tags','message']      
    }
}
    
output {
    stdout {codec => rubydebug}
    elasticsearch {
            hosts => [ "IP:9200" ]
            index => "%{[fields][index]}"
            manage_template => true
            template=>"/usr/share/logstash/templates/dispatcher_template.json"
            template_name=>"dispatcher_template"
            template_overwrite=>true
            #权限
#             user => "elastic"
#             password => "123123"
        }
}

templates下新建dispatcher_template.json:(定义logstash静态模版)

{
  "order": 10,
  "template": "dispatcher*",
  "settings": {
    "index": {
      "refresh_interval": "60s",
      "number_of_shards": "5",
      "store": {
        "type": "fs"
      },
      "number_of_replicas": "0"
    }
  },
  "mappings": {
      "dispatcher":{
        "dynamic": "strict",
        "properties": {
                 "@timestamp": {
					"format":"yyyy-MM-dd HH:mm:ss,SSS||yyyy-MM-dd||epoch_millis",
					"type": "date"
				}
                "Hostname": {
					"store": true,
					"type": "completion"
				},
				"CallSheetID": {
					"store": true,
					"type": "keyword"
				},
				"CallNo": {
					"store": true,
					"type": "keyword"
				},
				"CalledNo": {
					"store": true,
					"type": "keyword"
				},
				"PackageName": {
					"store": true,
					"type": "keyword"
				},
				"Thread": {
					"store": true,
					"type": "keyword"
				},
				"Exception": {
					"store": true,
					"type": "completion"
				},
				"Test": {
					"search_analyzer": "ik_smart",
					"analyzer":"ik_max_word",
					"store": true,
					"type": "text"
				}
      }
      }
  }
}
  1. Es

conf下新建elasticsearch.yml:(定义es配置文件)

---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "es-docker-cluster"
network.host: 0.0.0.0

## X-Pack settings 开启权限
xpack.security.enabled: false
# xpack.security.transport.ssl.enabled: true  
# xpack.license.self_generated.type: basic

#跨域
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type

docker-compose.yml:(编排容器-单体)

version: '3.7'
services:
  es01:
    image: elasticsearch:7.16.1
    container_name: es01
    volumes:
      - /你的地址/es/conf/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /你的地址/es/node01/data:/usr/share/elasticsearch/data
      - /你的地址/es/plugins:/usr/share/elasticsearch/plugins
    ports:
      - "9200:9200"
    environment:
      - discovery.type=single-node #单节点设置
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    networks:
      - elastic

  kibana:
    image: kibana:7.16.1
    container_name: kibana_client
    volumes:
      - /你的地址/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml:rw
    ports:
      - "5601:5601"
    networks:
      - elastic
    depends_on:
      - logstash
      - es01
  logstash:
    image: logstash:7.16.1
    container_name: logstash
    command: logstash -f /usr/share/logstash/config/conf.d/logstash.conf
    ports:
      - "9600:9600"
      - "5044:5044"
    volumes:
      - /你的地址/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
      - /你的地址/logstash/config/conf.d:/usr/share/logstash/config/conf.d
      - /你的地址/logstash/config/templates:/usr/share/logstash/templates
    networks:
      - elastic
    depends_on:
      - es01

networks:
  elastic:
    driver: bridge

然后启动docker-compose up

举报

相关推荐

0 条评论