Java示例

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor;
import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector; import java.util.Properties; public class KafkaStreamJoin { public static void main(String[] args) throws Exception {
// 创建执行环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 设置 Kafka 配置信息
Properties props = new Properties();
props.setProperty("bootstrap.servers", "localhost:9092");
props.setProperty("group.id", "test"); // 创建 FlinkKafkaConsumer,并添加数据源1
FlinkKafkaConsumer<String> kafkaConsumer1 = new FlinkKafkaConsumer<>("topic1", new SimpleStringSchema(), props);
DataStream<String> stream1 = env.addSource(kafkaConsumer1); // 创建 FlinkKafkaConsumer,并添加数据源2
FlinkKafkaConsumer<String> kafkaConsumer2 = new FlinkKafkaConsumer<>("topic2", new SimpleStringSchema(), props);
DataStream<String> stream2 = env.addSource(kafkaConsumer2); // 提取时间戳,以便基于时间的窗口
DataStream<Tuple2<String, Integer>> keyedStream1 = stream1.map(new MapFunction<String, Tuple2<String, Integer>>() {
@Override
public Tuple2<String, Integer> map(String value) throws Exception {
String[] parts = value.split(",");
return new Tuple2<>(parts[0], Integer.parseInt(parts[1]));
}
}).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple2<String, Integer>>() {
@Override
public long extractAscendingTimestamp(Tuple2<String, Integer> element) {
return element.f1;
}
}).keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
@Override
public String getKey(Tuple2<String, Integer> value) throws Exception {
return value.f0;
}
}); DataStream<Tuple2<String, Integer>> keyedStream2 = stream2.map(new MapFunction<String, Tuple2<String, Integer>>() {
@Override
public Tuple2<String, Integer> map(String value) throws Exception {
String[] parts = value.split(",");
return new Tuple2<>(parts[0], Integer.parseInt(parts[1]));
}
}).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple2<String, Integer>>() {
@Override
public long extractAscendingTimestamp(Tuple2<String, Integer> element) {
return element.f1;
}
}).keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
@Override
public String getKey(Tuple2<String, Integer> value) throws Exception {
return value.f0;
}
}); // 合并两个流,使用窗口进行计算
DataStream<String> result = keyedStream1.join(keyedStream2)
.where
(Tuple2<String, Integer> left, Tuple2<String, Integer> right, Collector<String> out) -> {
out.collect(left.f0 + "," + left.f1 + "," + right.f1);
})
.window(SlidingEventTimeWindows.of(Time.seconds(30), Time.seconds(10)))
.sum(1)
.map(new MapFunction<Tuple2<String, Integer>, String>() {
@Override
public String map(Tuple2<String, Integer> value) throws Exception {
return value.f0 + "," + value.f1;
}
}); // 将计算结果写入 MySQL 数据库
String sql = "INSERT INTO result (key, count) VALUES (?, ?)";
JDBCOutputFormat jdbcOutputFormat = JDBCOutputFormat.buildJDBCOutputFormat()
.setDrivername("com.mysql.jdbc.Driver")
.setDBUrl("jdbc:mysql://localhost:3306/test")
.setUsername("root")
.setPassword("password")
.setQuery(sql)
.setBatchInterval(5000)
.finish(); result.addSink(jdbcOutputFormat); // 执行程序
env.execute("KafkaStreamJoin");
}
}

  

使用FlinkTableApi

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.mysql.MySQLUpsertTableSink;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.api.config.TableConfigOptions;
import org.apache.flink.table.sinks.TableSink;
import org.apache.flink.types.Row; import java.util.Properties; public class KafkaStreamJoin { public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings); Properties props = new Properties();
props.setProperty("bootstrap.servers", "localhost:9092");
props.setProperty("group.id", "test"); DataStream<String> stream1 = env
.addSource(new FlinkKafkaConsumer<>("stream1", new SimpleStringSchema(), props));
DataStream<String> stream2 = env
.addSource(new FlinkKafkaConsumer<>("stream2", new SimpleStringSchema(), props)); Table table1 = tableEnv.fromDataStream(stream1, "key1, value1, ts1.rowtime");
Table table2 = tableEnv.fromDataStream(stream2, "key2, value2, ts2.rowtime"); Table resultTable = table1.join(table2)
.where("key1 = key2 && ts2 >= ts1 - INTERVAL '5' SECOND && ts2 <= ts1 + INTERVAL '5' SECOND")
.select("key1, value1, value2")
.groupBy("key1, value1, value2, TUMBLE(ts1, INTERVAL '30' SECOND, INTERVAL '10' SECOND)")
.select("key1, value1, value2, count(1) as cnt"); // 配置 MySQL 连接信息
String driverName = "com.mysql.jdbc.Driver";
String url = "jdbc:mysql://localhost:3306/test";
String username = "root";
String password = "123456"; // 定义 MySQL UpsertTableSink
TableSink<Row> tableSink = new MySQLUpsertTableSink(
new String[]{"key1", "value1", "value2", "cnt"},
new int[]{Types.STRING, Types.INT, Types.INT, Types.LONG},
url, username, password, driverName); // 配置 TableEnvironment
tableEnv.getConfig().getConfiguration().setBoolean(TableConfigOptions.WRITE_MODE_ALLOW_SPECIFIC, true);
tableEnv.getConfig().getConfiguration().setString(TableConfigOptions.WRITE_MODE, "UPSERT"); // 将计算结果写入 MySQL 数据库
resultTable
.map(new MapFunction<Row, Row>() {
@Override
public Row map(Row value) throws Exception {
return value;
}
})
.addSink(tableSink); env.execute("KafkaStreamJoin");
}
}

java版本对应的maven库

Group ID Artifact ID Version
org.apache.flink flink-core 1.12.5
org.apache.flink flink-streaming-java_2.12 1.12.5
org.apache.flink flink-table-api-java-bridge_2.12 1.12.5
org.apache.flink flink-table-planner_2.12 1.12.5
org.apache.flink flink-connector-kafka_2.12 1.12.5
org.apache.kafka kafka-clients 2.4.1
mysql mysql-connector-java 8.0.23
org.apache.flink flink-jdbc_2.12 1.12.5

  

scala示例

import org.apache.flink.api.common.functions.JoinFunction
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.api.windowing.assigners.SlidingProcessingTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer object FlinkKafkaJoinExample {
case class SensorReading(id: String, timestamp: Long, temperature: Double) def main(args: Array[String]): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment // 从Kafka中读取数据流
val properties = new Properties()
properties.setProperty("bootstrap.servers", "localhost:9092")
properties.setProperty("group.id", "test-group")
properties.setProperty("auto.offset.reset", "earliest") val stream1: DataStream[SensorReading] = env.addSource(
new FlinkKafkaConsumer[String]("topic1", new SimpleStringSchema(), properties)
)
.map(data => {
val dataArray = data.split(",")
SensorReading(dataArray(0), dataArray(1).toLong, dataArray(2).toDouble)
})
.assignAscendingTimestamps(_.timestamp) val stream2: DataStream[SensorReading] = env.addSource(
new FlinkKafkaConsumer[String]("topic2", new SimpleStringSchema(), properties)
)
.map(data => {
val dataArray = data.split(",")
SensorReading(dataArray(0), dataArray(1).toLong, dataArray(2).toDouble)
})
.assignAscendingTimestamps(_.timestamp) // 将两条数据流进行合并
val joinedStream: DataStream[(String, Double, Double)] = stream1.join(stream2)
.where(_.id)
.equalTo(_.id)
.window(SlidingProcessingTimeWindows.of(Time.seconds(10), Time.seconds(5)))
.apply(new JoinFunction[SensorReading, SensorReading, (String, Double, Double)] {
override def join(first: SensorReading, second: SensorReading): (String, Double, Double) =
(first.id, first.temperature, second.temperature)
}) // 对合并后的流进行算子操作
val resultStream: DataStream[(String, Double)] = joinedStream
.map(data => (data._1, (data._2 + data._3) / 2))
.filter(data => data._2 > 30) // 将结果输出到MySQL
resultStream.addSink(new JdbcSink[(String, Double)](
"INSERT INTO result_table (id, temperature) VALUES (?, ?)",
new JdbcStatementBuilder[(String, Double)] {
override def accept(ps: PreparedStatement, v: (String, Double)): Unit = {
ps.setString(1, v._1)
ps.setDouble(2, v._2)
}
},
new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
.withUrl("jdbc:mysql://localhost:3306/test")
.withDriverName("com.mysql.jdbc.Driver")
.withUsername("root")
.withPassword("password")
.build()
)) env.execute("Flink Kafka Join Example")
}
}

  

scala的依赖包maven引用

<dependencies>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_2.11</artifactId>
<version>1.12.2</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_2.11</artifactId>
<version>1.12.2</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_2.11</artifactId>
<version>1.12.2</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.26</version>
</dependency>
</dependencies>

最新文章

  1. 获取系统中所有进程&amp;线程信息
  2. request 和response
  3. 写了个简单的pdo的封装类
  4. jquery之clone()方法详解
  5. nf_contrack_netlink.c
  6. mysql-1862、1820、java.sql.SQLException: Your password has expired. To log in you must change it using a client that supports expired passwords.
  7. Windows下Oracle不显示中文[已解决]
  8. Java设计模式(二)抽象工厂模式
  9. weblogic 安全漏洞 CVE-2017-5638
  10. MVC 移动识别
  11. java 中使用log4j
  12. mysql5.7.20安装
  13. mysql 压缩方法
  14. 使用Visual Studio Code开发Asp.Net Core WebApi学习笔记(十)-- 发布(Windows)
  15. 20155322 2016-2017-2 《Java程序设计》第2周学习总结
  16. http协议简析(一)
  17. python爬虫系列:(一)、安装scrapy
  18. day18-事务与连接池 1.复习
  19. git branch简单使用
  20. openshift scc解析

热门文章

  1. Dashboard是什么意思 Dashboard怎么用?
  2. Ubuntu20.04上用tmux管理新进程
  3. Linux 上安装 jmeter
  4. 国内 IoT 物联网平台终局的思考:未来只会有 3家
  5. 打车起步价8元(3KM以内) 超过3KM,超出的每公里1.2元 超过5KM,超出的每公里1.5元 请在键盘上接收公里数,得出总价。
  6. 项目实训 DAY 9
  7. Office 2016 未授权
  8. AutoCAD2018
  9. 01背包&amp;完全背包二维写法的对比,进而理解一维优化后的正逆序
  10. char *setlocale(int category, const char *locale)