Kafka 多维度系统精讲(7)- kafka consumer(1)

1 Consumer API

https://kafka.apache.org/documentation.html#consumerapi
在这里插入图片描述


  • 消费者
  • 工作里这种消费方法不推荐:可能业务还没有处理完,已经自动提交了,下次就消费不到数据了。
package com.tzb.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;

/**
 * @Description TODO
 * @Author tzb
 * @Date 2020/9/27 8:57
 * @Version 1.0
 **/
public class ConsumerSample {

    public final static String TOPIC_NAME = "tzb-new-topic";

    public static void main(String[] args) {
        helloworld();
    }

    private static void helloworld(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.10.103:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // 订阅一个或者多个 topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true) {
            // 定时间隔去拉取
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                        record.partition(),record.offset(), record.key(), record.value());
        }
    }

}


  • 生产者
package com.tzb.kafka.producer;

import org.apache.kafka.clients.producer.*;

import javax.activation.MailcapCommandMap;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * @Description TODO
 * @Author tzb
 * @Date 2020/9/23 18:49
 * @Version 1.0
 **/
public class ProducerSample {

    public final static String TOPIC_NAME = "tzb-new-topic";

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        // 异步发送
        //  producerSend();

        // producer 异步阻塞发送
        //producerSyncSend();

        // 异步发送带回调函数
       // producerSendWithCallback();

        // 自定义分区
        producerSendWithCallbackAndPartition();

    }

    /**
     * producer 异步发送带回调函数
     * 自定义分区
     */
    public static void producerSendWithCallbackAndPartition() throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.10.103:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.tzb.kafka.producer.SamplePartition");

        Producer<String, String> producer = new KafkaProducer<>(properties);

        // 消息对象
        for (int i = 0; i < 100; i++) {
            String key = "key-" + i;
            ProducerRecord<String, String> record =
                    new ProducerRecord<String, String>(TOPIC_NAME, key, "value-" + i);
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
                    System.out.println( " partition: " + recordMetadata.partition() + " , offset: " + recordMetadata.offset());
                }
            });
        }
        // 所有的通道打开都需要关闭
        producer.close();

    }

在这里插入图片描述

1.1 Consumer 之手动提交

    private static void commitedOffset(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.10.103:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // 订阅一个或者多个 topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true) {
            // 定时间隔去拉取
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records){
                // 想把数据保存到数据库,成功就成功,
                // TODO recode to DB
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                        record.partition(),record.offset(), record.key(), record.value());

                // 如果失败,则回滚,不要提交offset
            }

            // 手动通知 offset 提交
            consumer.commitAsync();

        }
    }

1.2 消费分组

  • Consumer 注意事项
  • 一个 Consumer 可以对应一个 partition , 也可以对应多个 partition。但是同一个 partition 不能对应同一个 ConsumerGroup 下的多个 Consumer

在这里插入图片描述

在这里插入图片描述


  • 一对一
    在这里插入图片描述
    在这里插入图片描述
已标记关键词 清除标记
©️2020 CSDN 皮肤主题: 书香水墨 设计师:CSDN官方博客 返回首页