Flink timer定时器
常见timer
基于处理时间或者事件时间处理过一个元素之后, 注册一个定时器, 然后指定的时间执行.
Context和OnTimerContext 所持有的TimerService对象拥有以下方法:
currentProcessingTime(): Long 返回当前处理时间
currentWatermark(): Long 返回当前watermark的时间戳
registerProcessingTimeTimer(timestamp: Long): Unit会注册当前key的processing time的定时器。当processing time到达定时时间时,触发timer。
registerEventTimeTimer(timestamp: Long): Unit 会注册当前key的event time 定时器。当水位线大于等于定时器注册的时间时,触发定时器执行回调函数。
deleteProcessingTimeTimer(timestamp: Long): Unit 删除之前注册处理时间定时器。如果没有这个时间戳的定时器,则不执行。
deleteEventTimeTimer(timestamp: Long): Unit 删除之前注册的事件时间定时器,如果没有此时间戳的定时器,则不执行。
案例一:基于wordcount 的定时器
import org.apache.flink.api.common.functions.MapFunction;import org.apache.flink.api.common.state.ValueState;import org.apache.flink.api.common.state.ValueStateDescriptor;import org.apache.flink.api.java.tuple.Tuple2;import org.apache.flink.configuration.Configuration;import org.apache.flink.streaming.api.TimeCharacteristic;import org.apache.flink.streaming.api.datastream.DataStreamSource;import org.apache.flink.streaming.api.datastream.KeyedStream;import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;import org.apache.flink.streaming.api.functions.KeyedProcessFunction;import org.apache.flink.util.Collector;
public class ProcessTimerDemo {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env= StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
//1000,hello
DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndCount = lines.map(new MapFunction<String, Tuple2<String, Integer>>(){
@Override
public Tuple2<String, Integer> map(String line) throws Exception {
String[] fields = line.split(",");return Tuple2.of(fields[0], Integer.parseInt(fields[1]));}});
KeyedStream<Tuple2<String, Integer>, String> keyed = wordAndCount.keyBy(tp -> tp.f0);
SingleOutputStreamOperator<Tuple2<String, Integer>> process = keyed.process(new KeyedProcessFunction<String, Tuple2<String, Integer>, Tuple2<String, Integer>>(){
private transient ValueState<Integer> counter;
@Override
public void open(Configuration parameters) throws Exception {
ValueStateDescriptor<Integer> stateDescriptor = new ValueStateDescriptor<>("wc-state", Integer.class);
counter = getRuntimeContext().getState(stateDescriptor);}
@Override
public void processElement(Tuple2<String, Integer> value, Context ctx, Collector<Tuple2<String, Integer>> out) throws Exception {
// 获取当前的ProcessingTime
long currentProcessingTime = ctx.timerService().currentProcessingTime();
//注册定时器,如果注册的是ProcessingTime类型的定时器,当SubTask所在机器的ProcessingTime大于等于注册定时器的时间,就会触发onTimer方法
long fireTime = currentProcessingTime - currentProcessingTime % 60000 + 60000;
//注册ProcessingTime的定时器
ctx.timerService().registerProcessingTimeTimer(fireTime);
Integer currentCount = value.f1;
Integer historyCount = counter.value();if(historyCount == null){
historyCount =0;}
int totalCount = historyCount + currentCount;
// 更新状态
counter.update(totalCount);}
// 当闹钟到了指定时间,就执行onTimer方法
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<Tuple2<String, Integer>> out) throws Exception {
// 定时触发,输出当前结果
Integer value = counter.value();
String currentKey = ctx.getCurrentKey();
// 输出k,v
out.collect(Tuple2.of(currentKey, value));}});
process.print();
env.execute("timer job");}}
案列二:基于eventTime 的Timer
import org.apache.flink.api.common.functions.MapFunction;import org.apache.flink.api.common.state.ListState;import org.apache.flink.api.common.state.ListStateDescriptor;import org.apache.flink.api.common.typeinfo.TypeHint;import org.apache.flink.api.common.typeinfo.TypeInformation;import org.apache.flink.api.java.tuple.Tuple;import org.apache.flink.api.java.tuple.Tuple2;import org.apache.flink.configuration.Configuration;import org.apache.flink.streaming.api.TimeCharacteristic;import org.apache.flink.streaming.api.datastream.DataStreamSource;import org.apache.flink.streaming.api.datastream.KeyedStream;import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;import org.apache.flink.streaming.api.functions.KeyedProcessFunction;import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;import org.apache.flink.streaming.api.windowing.time.Time;import org.apache.flink.util.Collector;
/**
* 只有keyedStream在使用ProcessFunction时可以使用State和Timer定时器
*/
public class ProcessFunctionWithTimerDemo2 {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env= StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
//1000,hello
DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);
SingleOutputStreamOperator<String> linesWithWaterMark = lines.assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<String>(Time.seconds(0)){
@Override
public long extractTimestamp(String element){return Long.parseLong(element.split(",")[0]);}});
SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = linesWithWaterMark.map(new MapFunction<String, Tuple2<String, Integer>>(){
@Override
public Tuple2<String, Integer> map(String line) throws Exception {
String word = line.split(",")[1];return Tuple2.of(word, 1);}});
//调用keyBy进行分组
KeyedStream<Tuple2<String, Integer>, Tuple> keyed = wordAndOne.keyBy(0);
//没有划分窗口,直接调用底层的process方法
keyed.process(new KeyedProcessFunction<Tuple, Tuple2<String, Integer>, Tuple2<String, Integer>>(){
private transient ListState<Tuple2<String, Integer>> bufferState;
@Override
public void open(Configuration parameters) throws Exception {
ListStateDescriptor<Tuple2<String, Integer>> listStateDescriptor = new ListStateDescriptor<Tuple2<String, Integer>>("list-state",
TypeInformation.of(new TypeHint<Tuple2<String, Integer>>(){}));
bufferState = getRuntimeContext().getListState(listStateDescriptor);}
@Override
public void processElement(Tuple2<String, Integer> value, Context ctx, Collector<Tuple2<String, Integer>> out) throws Exception {
//out.collect(value);
bufferState.add(value);
//获取当前的event time
Long timestamp = ctx.timestamp();
//10:14:13 ->10:15:00
//输入的时间 [10:14:00, 10:14:59) 注册的定时器都是 10:15:00
System.out.println("current event time is : " + timestamp);
//注册定时器,如果注册的是EventTime类型的定时器,当WaterMark大于等于注册定时器的时间,就会触发onTimer方法
long timer = timestamp - timestamp % 60000 + 60000;
System.out.println("next timer is: " + timer);
ctx.timerService().registerEventTimeTimer(timer);}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<Tuple2<String, Integer>> out) throws Exception {
Iterable<Tuple2<String, Integer>> iterable = bufferState.get();for(Tuple2<String, Integer> tp : iterable){
out.collect(tp);}
//请求当前ListState中的数据
bufferState.clear();}}).print();
env.execute();}}
案列三:基于eventTime 的Timer
// 创建bean类CountWithTimestamp,里面有三个字段
public class CountWithTimestamp {
public String key;
public long count;
public long lastModified;}
// 创建FlatMapFunction的实现类Splitter,作用是将字符串分割后生成多个Tuple2实例,f0是分隔后的单词,f1等于1:
import org.apache.flink.api.common.functions.FlatMapFunction;import org.apache.flink.api.java.tuple.Tuple2;import org.apache.flink.util.Collector;import org.apache.flink.util.StringUtils;
public class Splitter implements FlatMapFunction<String, Tuple2<String, Integer>>{
@Override
public void flatMap(String s, Collector<Tuple2<String, Integer>> collector) throws Exception {
if(StringUtils.isNullOrWhitespaceOnly(s)){
System.out.println("invalid line");return;}
for(String word : s.split(" ")){
collector.collect(new Tuple2<String, Integer>(word, 1));}}}
// 最后是整个逻辑功能的主体:ProcessTime.java,这里面有自定义的KeyedProcessFunction子类,还有程序入口的main方法import com.bolingcavalry.Splitter;import org.apache.flink.api.common.state.ValueState;import org.apache.flink.api.common.state.ValueStateDescriptor;import org.apache.flink.api.java.tuple.Tuple;import org.apache.flink.api.java.tuple.Tuple2;import org.apache.flink.configuration.Configuration;import org.apache.flink.streaming.api.TimeCharacteristic;import org.apache.flink.streaming.api.datastream.DataStream;import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;import org.apache.flink.streaming.api.functions.KeyedProcessFunction;import org.apache.flink.streaming.api.watermark.Watermark;import org.apache.flink.util.Collector;import java.text.SimpleDateFormat;import java.util.Date;
/**
* @author will
* @email [email protected]
* @date 2020-05-17 13:43
* @description 体验KeyedProcessFunction类(时间类型是处理时间)
*/
public class ProcessTime {
/**
* KeyedProcessFunction的子类,作用是将每个单词最新出现时间记录到backend,并创建定时器,
* 定时器触发的时候,检查这个单词距离上次出现是否已经达到10秒,如果是,就发射给下游算子
*/
static class CountWithTimeoutFunction extends KeyedProcessFunction<Tuple, Tuple2<String, Integer>, Tuple2<String, Long>>{
// 自定义状态
private ValueState<CountWithTimestamp> state;
@Override
public void open(Configuration parameters) throws Exception {
// 初始化状态,name是myState
state = getRuntimeContext().getState(new ValueStateDescriptor<>("myState", CountWithTimestamp.class));}
@Override
public void processElement(
Tuple2<String, Integer> value,
Context ctx,
Collector<Tuple2<String, Long>> out) throws Exception {
// 取得当前是哪个单词
Tuple currentKey = ctx.getCurrentKey();
// 从backend取得当前单词的myState状态
CountWithTimestamp current = state.value();
// 如果myState还从未没有赋值过,就在此初始化
if(current == null){
current = new CountWithTimestamp();
current.key = value.f0;}
// 单词数量加一
current.count++;
// 取当前元素的时间戳,作为该单词最后一次出现的时间
current.lastModified = ctx.timestamp();
// 重新保存到backend,包括该单词出现的次数,以及最后一次出现的时间
state.update(current);
// 为当前单词创建定时器,十秒后后触发
long timer = current.lastModified + 10000;
ctx.timerService().registerProcessingTimeTimer(timer);
// 打印所有信息,用于核对数据正确性
System.out.println(String.format("process, %s, %d, lastModified : %d (%s), timer : %d (%s)\n\n",
currentKey.getField(0),
current.count,
current.lastModified,
time(current.lastModified),
timer,
time(timer)));}
/**
* 定时器触发后执行的方法
* @param timestamp 这个时间戳代表的是该定时器的触发时间
* @param ctx
* @param out
* @throws Exception
*/
@Override
public void onTimer(
long timestamp,
OnTimerContext ctx,
Collector<Tuple2<String, Long>> out) throws Exception {
// 取得当前单词
Tuple currentKey = ctx.getCurrentKey();
// 取得该单词的myState状态
CountWithTimestamp result = state.value();
// 当前元素是否已经连续10秒未出现的标志
boolean isTimeout =false;
// timestamp是定时器触发时间,如果等于最后一次更新时间+10秒,就表示这十秒内已经收到过该单词了,
// 这种连续十秒没有出现的元素,被发送到下游算子
if(timestamp == result.lastModified + 10000){
// 发送
out.collect(new Tuple2<String, Long>(result.key, result.count));
isTimeout =true;}
// 打印数据,用于核对是否符合预期
System.out.println(String.format("ontimer, %s, %d, lastModified : %d (%s), stamp : %d (%s), isTimeout : %s\n\n",
currentKey.getField(0),
result.count,
result.lastModified,
time(result.lastModified),
timestamp,
time(timestamp),
String.valueOf(isTimeout)));}}
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env= StreamExecutionEnvironment.getExecutionEnvironment();
// 并行度1
env.setParallelism(1);
// 处理时间
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
// 监听本地9999端口,读取字符串
DataStream<String> socketDataStream = env.socketTextStream("localhost", 9999);
// 所有输入的单词,如果超过10秒没有再次出现,都可以通过CountWithTimeoutFunction得到
DataStream<Tuple2<String, Long>> timeOutWord = socketDataStream
// 对收到的字符串用空格做分割,得到多个单词
.flatMap(new Splitter())
// 设置时间戳分配器,用当前时间作为时间戳
.assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<Tuple2<String, Integer>>(){
@Override
public long extractTimestamp(Tuple2<String, Integer> element, long previousElementTimestamp){
// 使用当前系统时间作为时间戳
return System.currentTimeMillis();}
@Override
public Watermark getCurrentWatermark(){
// 本例不需要watermark,返回null
return null;}})
// 将单词作为key分区
.keyBy(0)
// 按单词分区后的数据,交给自定义KeyedProcessFunction处理
.process(new CountWithTimeoutFunction());
// 所有输入的单词,如果超过10秒没有再次出现,就在此打印出来
timeOutWord.print();
env.execute("ProcessFunction demo : KeyedProcessFunction");}
public static String time(long timeStamp){return new SimpleDateFormat("yyyy-MM-dd hh:mm:ss").format(new Date(timeStamp));}}
注意事项
优先级队列默认使用的是内存存储,在一些数据量比较大并且重度依赖定时触发的任务会占用比较大的内存,可以选择Rocksdb存储定时信息
flink为了保证定时触发操作(onTimer)与正常处理(processElement)操作的线程安全,做了同步处理,在调用触发时必须要获取到锁,也就是二者同时只能有一个执行,因此一定要保证onTimer处理的速度,以免任务发生阻塞。
如果不做同步处理,processElement方法中会进行state.update(),onTimer中会进行state.value(),两者会发生不一致从而引发线程安全问题。
版权归原作者 活在风浪里~ 所有, 如有侵权,请联系我们删除。