通过之前《kafka分布式消息队列介绍以及集群安装》的介绍,对kafka有了初步的了解。本文主要讲述java代码中常用的操作。

准备:增加kafka依赖

<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.2.0</version>
</dependency>

一、kafka中对topic的操作

package org.kafka;

import kafka.admin.DeleteTopicCommand;
import kafka.admin.TopicCommand; /**
* kafka主题操作
*/
public class TopicDemo {
/**
* 添加主题
* linux命令:bin/kafka-topics.sh --create --zookeeper 192.168.2.100:2181 --replication-factor 3 --partitions 1 --topic topictest0416
*/
public static void createTopic() {
String[] options = new String[] {
"--create",
"--zookeeper",
"192.168.2.100:2181",
"--replication-factor",
"3",
"--partitions",
"1",
"--topic",
"topictest0416" };
TopicCommand.main(options);
} /**
* 查询所有主题
* linux命令:bin/kafka-topics.sh --list --zookeeper 192.168.2.100:2181
*/
public static void queryTopic() {
String[] options = new String[] {
"--list",
"--zookeeper",
"192.168.2.100:2181" };
TopicCommand.main(options);
} /**
* 查看指定主题的分区及副本状态信息
* bin/kafka-topics.sh --describe --zookeeper 192.168.2.100:2181 --topic topictest0416
*/
public static void queryTopicByName() {
String[] options = new String[]{
"--describe",
"--zookeeper",
"192.168.2.100:2181",
"--topic",
"topictest0416",
};
TopicCommand.main(options);
} /**
* 修改主题
* linux命令:bin/kafka-topics.sh --zookeeper 192.168.2.100:2181 --alter --topic topictest0416 --partitions 3
*/
public static void alterTopic() {
String[] options = new String[]{
"--alter",
"--zookeeper",
"192.168.2.100:2181",
"--topic",
"topictest0416",
"--partitions",
"3"
};
TopicCommand.main(options);
} /**
* 删除主题
*/
public static void delTopic() {
String[] options = new String[] {
"--zookeeper",
"192.168.2.100:2181",
"--topic",
"topictest0416" };
DeleteTopicCommand.main(options);
} }

二、Producer代码

package org.kafka;

import java.util.Properties;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig; public class ProducerDemo {
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
//zookeeper集群列表
props.put("zk.connect", "hadoop1-1:2181,hadoop1-2:2181,hadoop1-3:2181");
props.put("metadata.broker.list", "hadoop1-1:9092,hadoop1-2:9092,hadoop1-3:9092");
//设置消息使用哪个类来序列化
props.put("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig config = new ProducerConfig(props);
//构造Producer对象
Producer<String, String> producer = new Producer<String, String>(config); // 发送业务消息
// 读取文件 读取内存数据库
for (int i = 0; i < 10; i++) {
Thread.sleep(500);
KeyedMessage<String, String> km = new KeyedMessage<String, String>("topictest0416", "I am a producer " + i + " hello!");
producer.send(km);
} }
}

三、consumer代码

package org.kafka;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties; import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata; public class ConsumerDemo {
private static final String topic = "topictest0416";
private static final Integer threads = 1; public static void main(String[] args) {
Properties props = new Properties();
//zookeeper集群列表
props.put("zookeeper.connect", "hadoop1-1:2181,hadoop1-2:2181,hadoop1-3:2181");
//消费者组ID
props.put("group.id", "001");
//设置读取的偏移量;smallest意思是指向最小的偏移量
props.put("auto.offset.reset", "smallest");
//将Properties封装成消费者配置对象
ConsumerConfig config = new ConsumerConfig(props);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(config); Map<String, Integer> topicMap = new HashMap<>();
//key为消费的topic
//value为消费的线程数量
topicMap.put(topic, threads); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); for (final KafkaStream<byte[], byte[]> kafkaStream : streams) {
new Thread(new Runnable() {
@Override
public void run() {
for (MessageAndMetadata<byte[], byte[]> mm : kafkaStream) {
System.out.println(new String(mm.message()));
}
}
}).start();
}
} }

四、测试

  先启动Consumer,再启动Producer

  测试结果:

  

最新文章

  1. 高仿QQ顶部控件之IOS SegmentView
  2. Css--深入学习之折角效果
  3. 关于C#静态变量初始化问题
  4. Appium for IOS testing on Mac
  5. [LintCode] Scramble String 爬行字符串
  6. 基于docker+etcd+confd + haproxy构建高可用、自发现的web服务
  7. Css选择器的优先级
  8. unity 多线程
  9. Qt5官方demo分析集29——Extending QML - Property Value Source Example
  10. jq、js中判断checkbox是否选中
  11. 3. leetcode 463 Island Perimeter
  12. Linux分区的注意事项以及远程连接排错
  13. linux dns搭建
  14. BZOJ-9-3295: [Cqoi2011]动态逆序对
  15. BeautifulSoup模块过滤掉html标签,只拿文本内容(处理XSS攻击)
  16. 多个SDK控制管理
  17. 用列表实现一个简单的图书管理系统 python
  18. 剑指Offer 51. 构建乘积数组 (数组)
  19. OneNET麒麟座应用开发之六:与气体质量流量控制器通讯
  20. R语言读取Hive数据表

热门文章

  1. buuctf
  2. Visual Studio Code修改全屏背景
  3. 【游戏体验】Haunted House(鬼屋历险记)
  4. Yii2中事务的使用
  5. Maven工程pom中定义jdk版本
  6. JEECG右上角用户信息完整显示
  7. 「JSOI2015」最小表示
  8. mysql 导出sql文件
  9. 安装nodejs时提示Leaving directory
  10. Bugku-CTF加密篇之 托马斯.杰斐逊