【Kafka六】Kafka Producer和Consumer多Broker、多Partition场景

编程技术  /  houtizong 发布于 3年前   68

0.Kafka服务器配置

3个broker

1个topic,6个partition,副本因子是2

2个consumer,每个consumer三个线程并发读取

 

1. Producer

package kafka.examples.multibrokers.producers;import java.util.Properties;import java.util.Random;import kafka.javaapi.producer.Producer;import kafka.producer.KeyedMessage;import kafka.producer.ProducerConfig;public class MultiBrokerProducer {    private static Producer<String, String> producer;    private static Properties props = new Properties();    static {        props.put("metadata.broker.list", "192.168.26.140:9092,192.168.26.140:9093,192.168.26.140:9094");        props.put("serializer.class", "kafka.serializer.StringEncoder");        props.put("partitioner.class", "kafka.examples.multibrokers.partitioner.TopicPartitioner");        props.put("request.required.acks", "1");        ProducerConfig config = new ProducerConfig(props);        producer = new Producer<String, String>(config);    }    public static void main(String[] args) {        Random rnd = new Random();        String topic = "learn.topic.p8.r2";        for (long i = 0; i < 10000; i++) {            String key = "" + rnd.nextInt(255);            String msg = "The " + i + " message is for key - " + key;            KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, key, msg);            producer.send(data);           System.out.println(i);        }        producer.close();    }}

 

 

2. Partitioner

package kafka.examples.multibrokers.partitioner;import kafka.producer.Partitioner;import kafka.utils.VerifiableProperties;import java.util.Random;public class TopicPartitioner implements Partitioner {    public TopicPartitioner(VerifiableProperties props) {    }    @Override    public int partition(Object key, int numPartitions) {        int hashCode;        if (key == null) {            hashCode = new Random().nextInt(255);        } else {            hashCode = key.hashCode();        }        if (numPartitions <= 0) {            return 0;        }        return hashCode % numPartitions;    }}

 

3. Consumer

package kafka.examples.multibrokers.consumers;import java.util.HashMap;import java.util.List;import java.util.Map;import java.util.Properties;import java.util.concurrent.CountDownLatch;import java.util.concurrent.ExecutorService;import java.util.concurrent.Executors;import kafka.consumer.Consumer;import kafka.consumer.ConsumerConfig;import kafka.consumer.ConsumerIterator;import kafka.consumer.KafkaStream;import kafka.javaapi.consumer.ConsumerConnector;public class MultiThreadHLConsumer {    private ExecutorService executor;    private final ConsumerConnector consumer;    private final String topic;    public MultiThreadHLConsumer(String zookeeper, String groupId,                                 String topic) {        Properties props = new Properties();        props.put("zookeeper.connect", zookeeper);        props.put("group.id", groupId);        props.put("zookeeper.session.timeout.ms", "500");        props.put("zookeeper.sync.time.ms", "250");        props.put("auto.commit.interval.ms", "1000");        consumer = Consumer.createJavaConsumerConnector(new                ConsumerConfig(props));        this.topic = topic;    }    public void doConsume(int threadCount) {        Map<String, Integer> topicCount = new HashMap<String, Integer>();        // Define thread count for each topic        topicCount.put(topic, new Integer(threadCount));        // Here we have used a single topic but we can also add multiple topics to topicCount MAP        Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer.createMessageStreams(topicCount);        List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(topic);        System.out.println("streams length: " + streams.size());        // Launching the thread pool        executor = Executors.newFixedThreadPool(threadCount);        //Creating an object messages consumption        final CountDownLatch latch = new CountDownLatch(3);        for (final KafkaStream stream : streams) {            executor.submit(new Runnable() {                @Override                public void run() {                    ConsumerIterator<byte[], byte[]> consumerIte = stream.iterator();                    while (consumerIte.hasNext()) {                        System.out.println("Message from thread :: " + Thread.currentThread().getName() + " -- " + new String(consumerIte.next().message()));                    }                    latch.countDown();                }            });        }        try {            latch.await();        } catch (InterruptedException e) {            e.printStackTrace();        }        if (consumer != null) {            consumer.shutdown();        }        if (executor != null)            executor.shutdown();    }    public static void main(String[] args) {        String topic = "learn.topic.p8.r2";        int threadCount = 3;        MultiThreadHLConsumer simpleHLConsumer = new MultiThreadHLConsumer("192.168.26.140:2181", "learn.topic.p8.r2.consumers.group", topic);        simpleHLConsumer.doConsume(threadCount);    }}

 

 

4. 注意的问题

 

请勿发布不友善或者负能量的内容。与人为善,比聪明更重要!

留言需要登陆哦

技术博客集 - 网站简介:
前后端技术:
后端基于Hyperf2.1框架开发,前端使用Bootstrap可视化布局系统生成

网站主要作用:
1.编程技术分享及讨论交流,内置聊天系统;
2.测试交流框架问题,比如:Hyperf、Laravel、TP、beego;
3.本站数据是基于大数据采集等爬虫技术为基础助力分享知识,如有侵权请发邮件到站长邮箱,站长会尽快处理;
4.站长邮箱:[email protected];

      订阅博客周刊 去订阅

文章归档

文章标签

友情链接

Auther ·HouTiZong
侯体宗的博客
© 2020 zongscan.com
版权所有ICP证 : 粤ICP备20027696号
PHP交流群 也可以扫右边的二维码
侯体宗的博客