Elastic/日志分析程序人生

(十):日志采集log-pilot+ELK

2018-09-19  本文已影响0人  木夕月_fc7b

集群方式安装

内部服务

安装log-pilot

官网参考

https://github.com/AliyunContainerService/log-pilot

启动容器

docker run -itd \
    --name log-pilot \
    -v /var/run/docker.sock:/var/run/docker.sock \
    -v /:/host:ro \
    -e PILOT_TYPE=filebeat \
    -e LOGGING_OUTPUT=logstash  \
   -e LOGSTASH_HOST=localhost \
   -e LOGSTASH_PORT=5044 \
   --privileged \
   --restart=always  \
   --net=host \
    registry.cn-hangzhou.aliyuncs.com/acs-sample/log-pilot:latest

使用说明

docker run -it --rm  -p 10080:8080 -v /usr/local/tomcat/logs --label aliyun.logs.catalina=stdout --label aliyun.logs.access=/usr/local/tomcat/logs/localhost_access_log.*.txt tomcat

Label说明

启动tomcat的时候,我们声明了这样下面两个,告诉log-pilot这个容器的日志位置。

--label aliyun.logs.catalina=stdout 
--label aliyun.logs.access=/usr/local/tomcat/logs/localhost_access_log.*.txt

你还可以在应用容器上添加更多的标签

aliyun.logs.$name = $path

ELK安装

docker-compose.yml

version: '3.6'

volumes:
  esdata:
    driver: local
#  beatdata:
#    driver: local
networks:
  esnet:
    driver: overlay
#    attachable: true
configs:
  logstash_conf:
    file: ./logstash/logstash.conf
  kibana_config:
    file: ./kibana/kibana.yml
  es_proxy_config:
    file: ./es_proxy/nginx.conf
#  filebeat_config:
#    file: ./filebeat/filebeat.yml
services:
  elasticsearch:
    image: hub.c.163.com/muxiyue/elasticsearch:6.4.0
    #hostname: elasticsearch
    environment:
      - "cluster.name=es-cluster"
      - "bootstrap.memory_lock=true"
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
      - "network.host=0.0.0.0"
      - "discovery.zen.minimum_master_nodes=2"
      - "discovery.zen.ping.unicast.hosts=elasticsearch"
      - "ELASTIC_PASSWORD=elastic"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    networks:
      - esnet
    volumes:
      - esdata:/usr/share/elasticsearch/data
      - /etc/localtime:/etc/localtime:ro
    deploy:
      mode: global
      placement:
        constraints:
          - node.labels.elasticsearch == elasticsearch
      restart_policy:
        condition: on-failure
      endpoint_mode: dnsrr
#    ports:
#      - "9200:9200"
#      - "9300:9300"

  logstash:
    image: hub.c.163.com/muxiyue/logstash:6.4.0
    hostname: logstash
    environment:
      - "xpack.monitoring.elasticsearch.url=http://elasticsearch:9200"
      - "xpack.monitoring.enabled=true"
      - "xpack.monitoring.elasticsearch.username=elastic"
      - "xpack.monitoring.elasticsearch.password=elastic"
      - "LS_JAVA_OPTS=-Xmx2g"
    volumes:
      - /etc/localtime:/etc/localtime:ro
    deploy:
      resources:
        limits:
          cpus: '2'
          memory: 4096M
      placement:
        constraints:
          - node.labels.logstash == logstash
      mode: replicated
      replicas: 1
      restart_policy:
        condition: on-failure
    ports:
      - 5044:5044
    networks:
      - esnet
    configs:
      - source: logstash_conf
        target: /usr/share/logstash/pipeline/logstash.conf
    depends_on:
      - elasticsearch

  kibana:
    image: hub.c.163.com/muxiyue/kibana:6.4.0
    hostname: kibana
    environment:
      - "ELASTICSEARCH_URL=http://elasticsearch:9200"
    ports:
      - "5601:5601"
    volumes:
      - /etc/localtime:/etc/localtime:ro
    deploy:
      placement:
        constraints:
           - node.role == manager
           - node.labels.kibana == kibana
      restart_policy:
        condition: on-failure
    depends_on:
      - elasticsearch
    networks:
      - esnet
    configs:
      - source: kibana_config
        target: /usr/share/kibana/config/kibana.yml

  es_proxy:
    image: hub.c.163.com/library/nginx:1.13.0
    ports:
      - "9200:80"
    depends_on:
      - elasticsearch
    networks:
      - esnet
    volumes:
      - /etc/localtime:/etc/localtime:ro
    deploy:
      replicas: 1
      resources:
        limits:
          cpus: '1'
          memory: 1024M
      update_config:
        parallelism: 1
        delay: 5s
      placement:
        constraints:
          - node.role != manager
      restart_policy:
        condition: on-failure
    configs:
      - source: es_proxy_config
        target: /etc/nginx/nginx.conf

准备工作

1.修改max_map_count

非docker模式
sysctl -w vm.max_map_count=262144
echo -e 'vm.max_map_count=262144' >> /etc/sysctl.conf
sysctl -p
more /proc/sys/vm/max_map_count
swarm 集群模式
vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
LimitMEMLOCK=infinity
systemctl daemon-reload
service docker restart

2.修改limits

echo '* soft nproc 65536 
* hard nproc 65536
* soft nofile 65536 
* hard nofile 65536 
* soft memlock unlimited 
* hard memlock unlimited' >> /etc/security/limits.conf
cat  /etc/security/limits.conf

3. 关闭或者尽量少使用交换区

关闭交换区
sudo swapoff -a (永久关闭交换内存则需要修改/etc/fstab)

## 显示分区信息
sfdisk -l 
## 开启
swapon -a
尽量少的使用swap
sudo sysctl vm.swappiness=0
echo -e 'vm.swappiness=0'  >> /etc/sysctl.conf
sysctl -p
more /proc/sys/vm/swappiness
4.给节点添加标签
docker node update --label-add elk=elk --label-add logstash=logstash node146
docker node update --label-add elk=elk --label-add elasticsearch=elasticsearch node136
docker node update --label-add elk=elk --label-add elasticsearch=elasticsearch node137
docker node update --label-add elk=elk --label-add elasticsearch=elasticsearch node135
docker node update --label-add elk=elk --label-add kibana=kibana node191
docker node update --label-add elk=elk --label-add apmserver=apmserver node190

启动

docker stack deploy -c /root/elk/docker-compose.yml elk

测试

创建需要收集日志的应用
docker service create --name tomcat-logs-test  --replicas=2  --publish 10080:8080 --mount type=volume,destination=/usr/local/tomcat/logs  --container-label aliyun.logs.catalina=stdout --container-label aliyun.logs.access=/usr/local/tomcat/logs/localhost_access_log.*.txt --container-label aliyun.logs.access.tags="from=tomcat,target=tomcat_access_log" tomcat
访问10080端口产生日志
访问 http://xx.xx.xx.xx:10080
打开kibana
访问  http://xx.xx.xx.xx:5601
上一篇 下一篇

猜你喜欢

热点阅读