ClickHouse中文社区 部署

ClickHouse集群搭建从0到1

2017-12-13  本文已影响2428人  JackpGao

阅读此文,你将得到什么:

  1. ClickHouse安装的2种方法,以及背后的坑

  2. 一步步帮你实现ClickHouse从单机到集群化,以及集群化的原理、配置文件等

  3. 集群化的2种方案,孰优孰劣

如有疑问,请留言或者联系我

组件介绍

Docker安装

RPM包安装

配置文件解析

rpm启动方式

# 默认配置文件位置
root@localhost.localdomain:/  # ls /etc/clickhouse-server
config.xml  users.xml

# 上述文件定义了默认数据目录,临时目录位置,日志目录
/var/lib/clickhouse
/var/lib/clickhouse/tmp/
/var/log/clickhouse-server

# 默认启动脚本,注意,这个名字虽然叫server,其实是个shell脚本
/etc/rc.d/init.d/clickhouse-server
root@localhost.localdomain:/  # file /etc/rc.d/init.d/clickhouse-server 
/etc/rc.d/init.d/clickhouse-server: POSIX shell script, ASCII text executable, with very long lines


# 最大文件打开数
root@localhost.localdomain:/  # cat /etc/security/limits.d/clickhouse.conf 
clickhouse      soft    nofile  262144
clickhouse      hard    nofile  262144

# 默认crontab目录(没啥用)
/etc/cron.d/clickhouse-server


# 剩下就是/usr/bin下的二进制文件,但其实都是软链接到了clickhouse这个二进制文件
root@localhost.localdomain:/usr/bin  # ll | grep click -i
-rwxr-xr-x    1 root root      63M Sep 20 16:58 clickhouse
lrwxrwxrwx    1 root root       10 Dec 11 17:14 clickhouse-client -> clickhouse
-rwxr-xr-x    1 root root     3.3M Sep 20 16:58 clickhouse-compressor
lrwxrwxrwx    1 root root       10 Dec 11 17:14 clickhouse-server -> clickhouse
root@localhost.localdomain:/data1/clickhouse  # tree . -L 1
.
├── config-preprocessed.xml
├── config.xml
├── cores
├── data
├── flags
├── log
├── metadata
├── metrika.xml
├── start_ck.sh
├── status
├── tmp
├── users-preprocessed.xml
└── users.xml
- 这里其实是可以直接使用clickhouse-server(二进制那个),并采用-d参数启动的,但是实际过程,遇到了很多意外的情况,比如-d后,并不会以daemon方式启动,后来就不考虑直接命令行方式了
- 修改config.xml里对数据目录的定义
<?xml version="1.0"?>
<yandex>
   <!-- 日志 -->
   <logger>
       <level>trace</level>
       <log>/data1/clickhouse/log/server.log</log>
       <errorlog>/data1/clickhouse/log/error.log</errorlog>
       <size>1000M</size>
       <count>10</count>
   </logger>

   <!-- 端口 -->
   <http_port>8123</http_port>
   <tcp_port>9000</tcp_port>
   <interserver_http_port>9009</interserver_http_port>

   <!-- 本机域名 -->
   <interserver_http_host>这里需要用域名,如果后续用到复制的话</interserver_http_host>

   <!-- 监听IP -->
   <listen_host>0.0.0.0</listen_host>
   <!-- 最大连接数 -->
   <max_connections>64</max_connections>

   <!-- 没搞懂的参数 -->
   <keep_alive_timeout>3</keep_alive_timeout>

   <!-- 最大并发查询数 -->
   <max_concurrent_queries>16</max_concurrent_queries>

   <!-- 单位是B -->
   <uncompressed_cache_size>8589934592</uncompressed_cache_size>
   <mark_cache_size>10737418240</mark_cache_size>

   <!-- 存储路径 -->
   <path>/data1/clickhouse/</path>
   <tmp_path>/data1/clickhouse/tmp/</tmp_path>

   <!-- user配置 -->
   <users_config>users.xml</users_config>
   <default_profile>default</default_profile>

   <log_queries>1</log_queries>

   <default_database>default</default_database>

   <remote_servers incl="clickhouse_remote_servers" />
   <zookeeper incl="zookeeper-servers" optional="true" />
   <macros incl="macros" optional="true" />

   <!-- 没搞懂的参数 -->
   <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>

   <!-- 控制大表的删除 -->
   <max_table_size_to_drop>0</max_table_size_to_drop>

   <include_from>/data1/clickhouse/metrika.xml</include_from>
</yandex>

单机

分布式集群

CK是如何实现分布式的

<yandex>

<!-- 集群配置 -->
<clickhouse_remote_servers>
    <bip_ck_cluster>

        <!-- 数据分片1  -->
        <shard>
            <internal_replication>false</internal_replication>
            <replica>
                <host>ck31.xxxx.com.cn</host>
                <port>9000</port>
                <user>default</user>
                <password>6lYaUiFi</password>
            </replica>
        </shard>

        <!-- 数据分片2  -->
        <shard>
            <internal_replication>false</internal_replication>
            <replica>
                <host>ck32.xxxx.sina.com.cn</host>
                <port>9000</port>
                <user>default</user>
                <password>6lYaUiFi</password>
            </replica>
        </shard>

        <!-- 数据分片3  -->
        <shard>
            <internal_replication>false</internal_replication>
            <replica>
                <host>ck33.xxxxa.com.cn</host>
                <port>9000</port>
                <user>default</user>
                <password>6lYaUiFi</password>
            </replica>
        </shard>

    </bip_ck_cluster>
</clickhouse_remote_servers>

<!-- 本节点副本名称(这里无用) -->
<macros>
    <replica>ck1</replica>
</macros>

<!-- 监听网络(貌似重复) -->
<networks>
   <ip>::/0</ip>
</networks>

<!-- ZK  -->
<zookeeper-servers>
  <node index="1">
    <host>1.xxxx.sina.com.cn</host>
    <port>2181</port>
  </node>
  <node index="2">
    <host>2.xxxx.sina.com.cn</host>
    <port>2181</port>
  </node>
  <node index="3">
    <host>3.xxxxp.sina.com.cn</host>
    <port>2181</port>
  </node>
</zookeeper-servers>

<!-- 数据压缩算法  -->
<clickhouse_compression>
<case>
  <min_part_size>10000000000</min_part_size>
  <min_part_size_ratio>0.01</min_part_size_ratio>
  <method>lz4</method>
</case>
</clickhouse_compression>

</yandex>
<?xml version="1.0"?>
<yandex>
    <profiles>
        <!-- 读写用户设置  -->
        <default>
            <max_memory_usage>10000000000</max_memory_usage>
            <use_uncompressed_cache>0</use_uncompressed_cache>
            <load_balancing>random</load_balancing>
        </default>

        <!-- 只写用户设置  -->
        <readonly>
            <max_memory_usage>10000000000</max_memory_usage>
            <use_uncompressed_cache>0</use_uncompressed_cache>
            <load_balancing>random</load_balancing>
            <readonly>1</readonly>
        </readonly>
    </profiles>

    <!-- 配额  -->
    <quotas>
        <!-- Name of quota. -->
        <default>
            <interval>
                <duration>3600</duration>
                <queries>0</queries>
                <errors>0</errors>
                <result_rows>0</result_rows>
                <read_rows>0</read_rows>
                <execution_time>0</execution_time>
            </interval>
        </default>
    </quotas>

    <users>
        <!-- 读写用户  -->
        <default>
            <password_sha256_hex>967f3bf355dddfabfca1c9f5cab39352b2ec1cd0b05f9e1e6b8f629705fe7d6e</password_sha256_hex>
            <networks incl="networks" replace="replace">
                <ip>::/0</ip>
            </networks>
            <profile>default</profile>
            <quota>default</quota>
        </default>

        <!-- 只读用户  -->
        <ck>
            <password_sha256_hex>967f3bf355dddfabfca1c9f5cab39352b2ec1cd0b05f9e1e6b8f629705fe7d6e</password_sha256_hex>
            <networks incl="networks" replace="replace">
                <ip>::/0</ip>
            </networks>
            <profile>readonly</profile>
            <quota>default</quota>
        </ck>
    </users>
</yandex>

简单分布式方案

CREATE TABLE db.tb (date Date, ……) ENGINE = MergeTree(date, (date, hour, datetime), 8192)

CREATE TABLE db.tb_all (date Date, ……) ENGINE = Distributed(bip_ck_cluster, 'ck_test', 'dagger', rand())"

分布式+高可用方案1

分布式+高可用方案2

CREATE TABLE db.tb (date Date, ……) ENGINE = ReplicatedMergeTree('/clickhouse/db/tb/name', 'node_name', date, (date, hour, datetime), 8192)


CREATE TABLE db.tb_all (date Date, ……) ENGINE = Distributed(bip_ck_cluster, 'ck_test', 'dagger', rand())"

CK分布式的问题

总结

上一篇下一篇

猜你喜欢

热点阅读