2.5-ES的docker-compose安装配置(ES版本7.

2020-04-05  本文已影响0人  落日彼岸

一个集群两个节点

# 单服务器,两个节点-容器名称不能重复
# 整体的名字是父级文件夹的名字
version: '2.2'
services:
  cerebro:
    image: lmenezes/cerebro:0.8.3
    container_name: cerebro
    ports:
      - "9000:9000"
    command:
      - -Dhosts.0.host=http://elasticsearch:9200
    networks:
      - es72net
  kibana:
    image: docker.elastic.co/kibana/kibana:7.2.0
    container_name: kibana72
    environment:
      - I18N_LOCALE=zh-CN
      - XPACK_GRAPH_ENABLED=true
      - TIMELION_ENABLED=true
      - XPACK_MONITORING_COLLECTION_ENABLED="true"
    ports:
      - "5601:5601"
    networks:
      - es72net
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
    container_name: es72_01
    environment:
      - cluster.name=es-cluster
      - node.name=es72_01
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - discovery.seed_hosts=es72_01,es72_02
      - cluster.initial_master_nodes=es72_01,es72_02
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes: #挂载本地文件 
      - es72data1:/usr/share/elasticsearch/data
      - ~/docker-es-7.2:/home/docker-es
    ports:
      - 9200:9200
    networks:
      - es72net
  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
    container_name: es72_02
    environment:
      - cluster.name=es-cluster
      - node.name=es72_02
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - discovery.seed_hosts=es72_01,es72_02
      - cluster.initial_master_nodes=es72_01,es72_02
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes: #挂载本地文件 
      - es72data2:/usr/share/elasticsearch/data
      - ~/docker-es-7.2:/home/docker-es
    networks:
      - es72net


volumes:
  es72data1:
    driver: local
  es72data2:
    driver: local

networks:
  es72net:
    driver: bridge

三个集群每个集群一个节点

├── README
├── es72data1                    es1数据目录
├── es72data2                    es2数据目录
├── config                       配置目录
│   ├── es.yml                   es配置文件
│   ├── es1.yml                  es1配置文件
│   ├── es2.yml                  es2配置文件
└── docker-compose.yaml          docker-compose配置文件
# es集群-容器名称不能重复
# 整体的名字是父级文件夹的名字
version: '2.2'
services:
  cerebro:
    image: lmenezes/cerebro:0.8.3
    container_name: cerebro-cluster
    ports:
      - "9000:9000"
    command:
      - -Dhosts.0.host=http://elasticsearch:9200 #和es主集群名称保持一致
    networks:
      - es72net
  kibana:
    image: docker.elastic.co/kibana/kibana:7.2.0
    container_name: kibana72-cluster
    environment:
      - ELASTICSEARCH_URL=http://elasticsearch:9200 #未知原因-修改后无法生效
      - I18N_LOCALE=zh-CN
      - XPACK_GRAPH_ENABLED=true
      - TIMELION_ENABLED=true
      - XPACK_MONITORING_COLLECTION_ENABLED="true"
    ports:
      - "5601:5601"
    networks:
      - es72net
  elasticsearch: #本地测试主节点这个名字必须为 elasticsearch,否则kibana无法连接:http://elasticsearch:9200
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
    container_name: es72_00-cluster
    environment:
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    cap_add:
      - IPC_LOCK
    volumes: #挂载本地文件 
      # - ./logs/es1/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
      # - ./es72data1:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
      - ./config/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml #挂载集群配置文件
      - ~/docker-es-7.2:/home/docker-es
    ports:
      - 9200:9200
      - 9300:9300
    networks:
      - es72net
  es72_01:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
    container_name: es72_01-cluster
    environment:
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    cap_add:
      - IPC_LOCK
    volumes: #挂载本地文件 
      # - ./logs/es1/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
      # - ./es72data1:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
      - ./config/es1.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ~/docker-es-7.2:/home/docker-es
    ports:
      - 9201:9200
      - 9301:9300
    networks:
      - es72net
  es72_02:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
    container_name: es72_02-cluster
    environment:
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    cap_add:
      - IPC_LOCK
    volumes: #挂载本地文件 
      # - ./logs/es2/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
      # - ./es72data2:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
      - ./config/es2.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ~/docker-es-7.2:/home/docker-es
    ports:
      - 9202:9200
      - 9302:9300
    links:
      - es72_01
    networks:
      - es72net

volumes:
  es72data1:
    driver: local
  es72data2:
    driver: local

networks:
  es72net:
    driver: bridge
#es72_00-相关节点名称和docker-compose.yaml中相关节点名称保持一致
cluster.name: elk-cluster
node.name: es72_00-cluster
node.master: true
node.data: true

network.host: es72_00-cluster
#同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
#另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"


discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
discovery.zen.ping_timeout: 5s
 
bootstrap.memory_lock: true
action.destructive_requires_name: true


#master-eligible nodes
cluster.initial_master_nodes: ["es72_00-cluster", "es72_01-cluster", "es72_02-cluster"]
#es72_01-cluster-相关节点名称和docker-compose.yaml中相关节点名称保持一致
cluster.name: elk-cluster
node.name: es72_01-cluster
node.master: true
node.data: true

network.host: es72_01-cluster
#同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
#另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"


discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
discovery.zen.ping_timeout: 5s
 
bootstrap.memory_lock: true
action.destructive_requires_name: true
#es72_02-cluster-相关节点名称和docker-compose.yaml中相关节点名称保持一致
cluster.name: elk-cluster
node.name: es72_02-cluster
node.master: false
node.data: true
 
network.host: es72_02-cluster
#同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
#另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
 

discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
discovery.zen.ping_timeout: 5s
 
bootstrap.memory_lock: true
action.destructive_requires_name: true

踩过的坑

上一篇 下一篇

猜你喜欢

热点阅读