使用Java代码操作Kafka

2023-02-19  本文已影响0人  CoderInsight

简要概括

详细剖析

(0).Maven的配置

 <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>1.0.1</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>1.1.0</version>
        </dependency>
    </dependencies>
    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                    <!--    <verbal>true</verbal>-->
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.4.3</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <filters>
                                <filter>
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                            <transformers>
                                <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                                    <mainClass></mainClass>
                                </transformer>
                            </transformers>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

(1).生产者代码示例

package top.wangyq.producer;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class KafkaProducerStudy {
    /**
     * 通过javaAPI实现向kafka当中生产数据
     * @param args
     */
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        //消息的确认机制
        props.put("acks", "all");
        //设置当消息发送失败,重试的次数
        props.put("retries", 0);
        //缓冲区的大小  //默认32M
        props.put("buffer.memory", 33554432);
        //批处理数据的大小,每次写入多少数据到topic   //默认16KB
        props.put("batch.size", 16384);
        //可以延长多久发送数据   //默认为0 表示不等待 ,立即发送
        props.put("linger.ms", 1);
        // 
        props.put("buffer.memory", 33554432);
        //指定数据序列化和反序列化
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<String, String> producer = new KafkaProducer<String,String>(props);

        for(int i =0;i<10;i++){

            //既没有指定分区号,也没有数据的key,直接使用轮序的方式将数据发送到各个分区里面去
            ProducerRecord record = new ProducerRecord("demo01", "helloworld" + i);
            // 生产数据,方式1:
            producer.send(record);

            // 通过重写不同的 send 方法实现不同的方式生产数据
            // 生产数据,方式2
//            // 直接调用kafka的回调函数,然后重写其中的方法
//            producer.send(record, new Callback() {
//                @Override
//                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
//                    System.out.println("测试2:当我们在发送数据的时候不需要太复杂的操作的时候,可以直接嵌套一个内部函数");
//                    System.out.println(recordMetadata.topic());
//                    System.out.println(recordMetadata.partition());
//                    System.out.println(recordMetadata.offset());
//                }
//            });


            // 生产数据,方式3
//            System.out.println("测试3:如果我们要进行复杂的操作,为了便于之后的维护,可以直接在自定义在执行成功之后的回调函数");
//            producer.send(record, new MyCallback(record));
        }
        //关闭消息发送客户端
        producer.close();
    }
}
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.RecordMetadata;

/**
 * 自定义回调函数;
 * 注意函数的必须实现kafka回调函数的接口
 **/
public class MyCallback implements Callback {
    private Object msg;

    public MyCallback(Object msg) {
        this.msg = msg;
    }

    /**
     * RecordMetadata: 是我们发送的数据
     */
    @Override
    public void onCompletion(RecordMetadata metadata, Exception e) {
        System.out.println("topic = " + metadata.topic());
        System.out.println("partiton = " + metadata.partition());
        System.out.println("offset = " + metadata.offset());
        System.out.println(msg);
    }
}
package com.kaikeba.producer;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**

- 需求:开发kafka生产者代码
  */
  public class KafkaProducerStudyDemo {
  public static void main(String[] args) throws ExecutionException, InterruptedException {
      //准备配置属性
      Properties props = new Properties();
      //kafka集群地址
      props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
      //acks它代表消息确认机制   // 1 0 -1 all
      props.put("acks", "all");
      //重试的次数
      props.put("retries", 0);
      //批处理数据的大小,每次写入多少数据到topic
      props.put("batch.size", 16384);
      //可以延长多久发送数据
      props.put("linger.ms", 1);
      //缓冲区的大小
      props.put("buffer.memory", 33554432);
      props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
      props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

  ```
  //添加自定义分区函数
  props.put("partitioner.class","com.kaikeba.partitioner.MyPartitioner");
  
  Producer<String, String> producer = new KafkaProducer<String, String>(props);
  for (int i = 0; i < 100; i++) {
  
      // 这是异步发送的模式
      producer.send(new ProducerRecord<String, String>("test", Integer.toString(i), "hello-kafka-"+i), new Callback() {
          public void onCompletion(RecordMetadata metadata, Exception exception) {
              if(exception == null) {
                  // 消息发送成功
                  System.out.println("消息发送成功");
              } else {
                  // 消息发送失败,需要重新发送
              }
          }
  
      });
  
      // 这是同步发送的模式
      //producer.send(record).get();
      // 你要一直等待人家后续一系列的步骤都做完,发送消息之后
      // 有了消息的回应返回给你,你这个方法才会退出来
  }
  producer.close();

  }

}

(2).自动提交的方式消费数据

每个分区中的数据顺序读写的。

package top.wangyq.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.Arrays;
import java.util.Properties;

// 开发kafka消费者代码(自动提交偏移量)
public class KafkaConsumerStudy {
    public static void main(String[] args) {
        //准备配置属性
        Properties props = new Properties();
        //kafka集群地址
        props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        //消费者组id
        props.put("group.id", "consumer-test");
        //自动提交偏移量
        props.put("enable.auto.commit", "true");
        //自动提交偏移量的时间间隔
        props.put("auto.commit.interval.ms", "1000");
        //默认是latest
        //earliest: 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
        //latest: 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
        //none : topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
        props.put("auto.offset.reset","earliest");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        //指定消费哪些topic
        consumer.subscribe(Arrays.asList("demo01"));
        while (true) {
            //不断的拉取数据
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records) {
                //该消息所在的分区号
                int partition = record.partition();
                //该消息对应的key
                String key = record.key();
                //该消息对应的偏移量
                long offset = record.offset();
                //该消息内容本身
                String value = record.value();
                System.out.println("partition:"+partition+"\t key:"+key+"\toffset:"+offset+"\tvalue:"+value);
            }
        }
    }
}

(3).手动提交的方式消费数据

package top.wangyq.producer;



import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

// 开发kafka消费者代码(手动提交偏移量)
public class KafkaConsumerControllerOffset {
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        props.put("group.id", "controllerOffset");
        //关闭自动提交,改为手动提交偏移量
        props.put("enable.auto.commit", "false");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        //指定消费者要消费的topic,多个注意之间使用逗号隔开
        consumer.subscribe(Arrays.asList("demo01"));

        //定义一个阈值(数字),表示消息达到多少后手动提交偏移量
        final int minBatchSize = 20;

        //定义一个数组,缓冲一批数据
        // 定义一个集合,一批数据拉取过来之后保存到集合中,处理完成集合的数据之后,手动提交offset
        List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
        // 通过写一个死循环,一直循环的拉取数据
        // 此时即时没有指定分区消费,那么即时有多个分区,我们也是只是启动一个线程去消费多个分区,因为此时是写了一个死循环,只启动一个线程
        while (true) {
            // 不断的拉取数据,指定隔多长时间拉取一次数据
            ConsumerRecords<String, String> records = consumer.poll(100);

            // 把拉取到数据放入到缓冲区中
            for (ConsumerRecord<String, String> record : records) {
                buffer.add(record);
            }
            // 当缓冲区大小达到设定的阈值之后再去手动提交
            if (buffer.size() >= minBatchSize) {
                //insertIntoDb(buffer);  拿到数据之后,进行消费
                System.out.println("缓冲区的数据条数:" + buffer.size());
                System.out.println("我已经处理完这一批数据了...");
                // 优化方式1:手动同步提交offset
                consumer.commitSync();
                // 优化方式2:手动异步提交
                // consumer.commitAsync();
                
                // 在提交之后清空缓冲区中的数据
                buffer.clear();
            }
        }
    }
}

(5),消费指定分区的数据

package top.wangyq.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.Arrays;
import java.util.Properties;

public class ConsumPartition {
    public static void main(String[] args) {
        Properties props= new Properties();
        props.put("bootstrap.servers","node01:9092,node02:9092,node03:9092"); props.put("group.id", "mydemo");
        props.put("enable.auto.commit","true");
        props.put("auto.commit.interval.ms","1000");
        props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(props);
        String topic ="demo01";
        TopicPartition partition0 = new TopicPartition(topic, 0);
        TopicPartition partition1 = new TopicPartition(topic, 1);
        consumer.assign(Arrays.asList(partition0, partition1));
//手动指定消费指定分区的数据---end
        while (true) {
            ConsumerRecords<String,String> records = consumer.poll(100);
            for(ConsumerRecord<String, String> record : records)
                System.out.printf("offset= %d, key = %s, value = %s%n", record.offset(), record.key(),record.value());
        }
    }
}
上一篇 下一篇

猜你喜欢

热点阅读