Kafka 多维度系统精讲(8)- kafka consumer(2)

1 Consumer 单 Partition 提交 offset

  • bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tzb-new-topic
  • bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic tzb-new-topic --partitions 2

  • 生产者
public class ProducerSample {

    public final static String TOPIC_NAME = "tzb-new-topic";

    public static void main(String[] args) throws ExecutionException, InterruptedException {

        // 自定义分区
        producerSendWithCallbackAndPartition();

    }

    /**
     * producer 异步发送带回调函数
     * 自定义分区
     */
    public static void producerSendWithCallbackAndPartition() throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.10.103:9092");
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "0");
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
        properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.tzb.kafka.producer.SamplePartition");

        Producer<String, String> producer = new KafkaProducer<>(properties);

        // 消息对象
        for (int i = 0; i < 100; i++) {
            String key = "key-" + i;
            ProducerRecord<String, String> record =
                    new ProducerRecord<String, String>(TOPIC_NAME, key, "value-" + i);
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
                    System.out.println( " partition: " + recordMetadata.partition() + " , offset: " + recordMetadata.offset());
                }
            });
        }
        // 所有的通道打开都需要关闭
        producer.close();

    }

  • 消费者
public class ConsumerSample {

    public final static String TOPIC_NAME = "tzb-new-topic";

    public static void main(String[] args) {
        //helloworld();

        // 手动对每个 partition 进行提交
        commitedOffsetWithPartition();
    }


    // 手动提交 offset,并且手动控制 partition
    private static void commitedOffsetWithPartition() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.10.103:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // 订阅一个或者多个 topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true) {
            // 定时间隔去拉取
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

            // 每个 partition 单独处理
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                }

                long lastOffset = pRecord.get(pRecord.size() - 1).offset();

                // 单个 partition 中的 offset,并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交 offset
                consumer.commitSync(offset);
                System.out.println("===========  partition - " + partition +"====================");
            }


        }
    }

2 Consumer 手动控制一到多个分区

public class ConsumerSample {

    public final static String TOPIC_NAME = "tzb-new-topic";

    public static void main(String[] args) {
        //helloworld();

        // 手动对每个 partition 进行提交
        //commitedOffsetWithPartition();

        // 手动订阅某个或者某些分区,并提交offset
        commitedOffsetWithPartition2();

    }

    // 手动提交 offset,并且手动控制 partition,更加高级
    private static void commitedOffsetWithPartition2() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "192.168.10.103:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // tzb-new-topic, 0,1 两个 partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);

        // 订阅一个或者多个 topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        // 消费订阅某个 topic 的某个分区
        consumer.assign(Arrays.asList(p0));


        while (true) {
            // 定时间隔去拉取
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

            // 每个 partition 单独处理
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                }

                long lastOffset = pRecord.get(pRecord.size() - 1).offset();

                // 单个 partition 中的 offset,并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交 offset
                consumer.commitSync(offset);
                System.out.println("===========  partition - " + partition +"====================");
            }
            
        }
    }

在这里插入图片描述

已标记关键词 清除标记
©️2020 CSDN 皮肤主题: 书香水墨 设计师:CSDN官方博客 返回首页