1 Consumer 单 Partition 提交 offset
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tzb-new-topic
bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic tzb-new-topic --partitions 2
public class ProducerSample {
public final static String TOPIC_NAME = "tzb-new-topic";
public static void main(String[] args) throws ExecutionException, InterruptedException {
producerSendWithCallbackAndPartition();
}
public static void producerSendWithCallbackAndPartition() throws ExecutionException, InterruptedException {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.10.103:9092");
properties.put(ProducerConfig.ACKS_CONFIG, "all");
properties.put(ProducerConfig.RETRIES_CONFIG, "0");
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
properties.put(ProducerConfig.LINGER_MS_CONFIG, "1");
properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,"com.tzb.kafka.producer.SamplePartition");
Producer<String, String> producer = new KafkaProducer<>(properties);
for (int i = 0; i < 100; i++) {
String key = "key-" + i;
ProducerRecord<String, String> record =
new ProducerRecord<String, String>(TOPIC_NAME, key, "value-" + i);
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
System.out.println( " partition: " + recordMetadata.partition() + " , offset: " + recordMetadata.offset());
}
});
}
producer.close();
}
public class ConsumerSample {
public final static String TOPIC_NAME = "tzb-new-topic";
public static void main(String[] args) {
commitedOffsetWithPartition();
}
private static void commitedOffsetWithPartition() {
Properties props = new Properties();
props.setProperty("bootstrap.servers", "192.168.10.103:9092");
props.setProperty("group.id", "test");
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "1000");
props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(TOPIC_NAME));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
long lastOffset = pRecord.get(pRecord.size() - 1).offset();
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
consumer.commitSync(offset);
System.out.println("=========== partition - " + partition +"====================");
}
}
}
2 Consumer 手动控制一到多个分区
public class ConsumerSample {
public final static String TOPIC_NAME = "tzb-new-topic";
public static void main(String[] args) {
commitedOffsetWithPartition2();
}
private static void commitedOffsetWithPartition2() {
Properties props = new Properties();
props.setProperty("bootstrap.servers", "192.168.10.103:9092");
props.setProperty("group.id", "test");
props.setProperty("enable.auto.commit", "true");
props.setProperty("auto.commit.interval.ms", "1000");
props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
consumer.assign(Arrays.asList(p0));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> pRecord = records.records(partition);
for (ConsumerRecord<String, String> record : pRecord) {
System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
record.partition(), record.offset(), record.key(), record.value());
}
long lastOffset = pRecord.get(pRecord.size() - 1).offset();
Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
consumer.commitSync(offset);
System.out.println("=========== partition - " + partition +"====================");
}
}
}
