from pyflink.common import SimpleStringSchema, WatermarkStrategy
from pyflink.datastream import StreamExecutionEnvironment, KeyedProcessFunction
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer

# 1、创建flink执行环境
env = StreamExecutionEnvironment.get_execution_environment()

# 修改并行度
env.set_parallelism(1)

source = KafkaSource.builder() \
    .set_bootstrap_servers("master:9092") \
    .set_topics("fraud") \
    .set_group_id("my-group") \
    .set_starting_offsets(KafkaOffsetsInitializer.latest()) \
    .set_value_only_deserializer(SimpleStringSchema()) \
    .build()

# 基于kafka构建DataStream (无界流)
lines_ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "Kafka Source")

"""
kafka-console-producer.sh --broker-list master:9092 --topic fraud
001,100
001,0.5
001,600
001,800
001,300
002,0.1
001,600
002,800
"""
# 对于一个账户，如果出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易，就输出一个报警信息


# 1、解析数据
fraud_ds = lines_ds.map(lambda line: (line.split(",")[0], float(line.split(",")[1])))

# 2、按照账户id分组
key_by_ds = fraud_ds.key_by(lambda kv: kv[0])


# 3、如果出现小于 $1 美元的交易后紧跟着一个大于 $500 的交易
class FraudProcessFunction(KeyedProcessFunction):

    def __init__(self):
        # 同一个task中所有的key共享同一个表
        # self.state = False
        # self.min_money = 0

        # 为每一个key都保存一个状态
        self.state_dict = {}
        self.min_money_dict = {}

    # process_element: 每一条数据执行一次
    def process_element(self, kv, ctx: 'KeyedProcessFunction.Context'):
        id = kv[0]
        money = kv[1]

        # 判断上一次的金额是否小于1
        if id in self.state_dict and self.state_dict[id]:
            # 紧跟着一个大于 $500 的交易
            if money > 500:
                print("紧跟着一个大于 $500 的交易")
                yield id, self.min_money_dict[id], money

            # 重置状态
            self.state_dict[id] = False

        # 如果出现小于 $1 美元的交易
        if money < 1:
            print("出现小于 $1 美元的交易")
            self.state_dict[id] = True
            self.min_money_dict[id] = money


key_by_ds.process(FraudProcessFunction()).print()

env.execute()
