package cn.zd.demo.flink.statebackend;

import cn.zd.demo.flink.statebackend.constant.ConfigConstant;
import cn.zd.demo.flink.statebackend.dto.CustBaseInfoDTO;
import cn.zd.demo.flink.statebackend.dto.CustExtendInfoDTO;
import cn.zd.demo.flink.statebackend.dto.CustInfoDTO;
import com.alibaba.fastjson2.JSON;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.windowing.RichWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.ProcessingTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.triggers.CountTrigger;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;

import java.sql.Timestamp;
import java.util.Properties;

public class StateBackend {
    /**
     * todo；
     *   1. 参数配置化
     *   2.
     *
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        // Checkpoint
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        // 设置 状态后端
        env.setStateBackend(new FsStateBackend("file:///app/flink/statebackend"));
        // 全局并行度 亦可以通过启动参数中的【 -p 并行度 】 进行配置 可用并行度不足情况下，任务会提交失败（yarn等动态调度方式会自动分配）
        env.setParallelism(2);
        // 禁用操作链串联
//        env.disableOperatorChaining();
//        算子也可以设置
//    startNewChain() 创建新链  .disableChaining() 禁用链
        // 第一个流
        DataStream<String> dataStream1 =
                env.addSource(getKafkaDs(ConfigConstant.KAFKA1.SERVER, ConfigConstant.KAFKA1.TOPIC, ConfigConstant.KAFKA1.GROUPID)).keyBy((KeySelector<String, String>) s -> s);
        // 第二个流
        DataStream<String> dataStream2 = env.addSource(getKafkaDs(ConfigConstant.KAFKA2.SERVER, ConfigConstant.KAFKA2.TOPIC, ConfigConstant.KAFKA2.GROUPID)).keyBy(s -> s);
        DataStream<CustInfoDTO> dataStream = dataStream1.union(dataStream2).map((MapFunction<String, CustInfoDTO>) val -> {
                                                            System.out.println("数据转换前结果：" + val);
                                                            CustInfoDTO dto = new CustInfoDTO();
                                                            try {
                                                                if (val.contains("remark")) {
                                                                    // 按扩展信息转换
                                                                    dto.setExtendInfoDTO(JSON.parseObject(val, CustExtendInfoDTO.class));
                                                                } else {
                                                                    // 按基本信息转换
                                                                    dto.setBaseInfoDTO(JSON.parseObject(val, CustBaseInfoDTO.class));
                                                                }
                                                            } catch (Exception e) {
                                                                System.out.println("消息转换CustInfoDTO失败：" + val);
                                                                e.printStackTrace();
                                                                // todo 消息异常的处理逻辑
                                                            }
                                                            //  CustBaseInfoDTO 和 CustExtendInfoDTO均没有数据的处理
                                                            System.out.println("数据转换后结果：" + JSON.toJSONString(dto));
                                                            return dto;
                                                        }).filter(dto -> dto != null && (dto.getBaseInfoDTO() != null || dto.getExtendInfoDTO() != null))
//                .setParallelism(4) // 设置并行度
//                .startNewChain() // 新的操作链
//                .disableChaining() // 禁用链
//                .slotSharingGroup("group") // 插槽共享租 不同组完全隔离，默认default
                                                        .keyBy(CustInfoDTO::getCustNo).window(ProcessingTimeSessionWindows.withGap(Time.seconds(3))).trigger(CountTrigger.of(1))
                                                        .apply(new MyRichWindowFunction());
        SinkFunction jdbcSink = JdbcSink.sink("insert into tbl_cust_info (cust_No, cust_name, cust_id_no, sex, org_no, remark, c_date) values (?, ?, ?, ?, ?, ?, ?)", (statement, o) -> {
            CustInfoDTO dto = (CustInfoDTO) o;
            System.out.println("开始存储数据到mysql：");
            statement.setString(1, dto.getBaseInfoDTO().getCustNo());
            statement.setString(2, dto.getBaseInfoDTO().getCustName());
            statement.setString(3, dto.getBaseInfoDTO().getCustIdNo());
            statement.setInt(4, dto.getBaseInfoDTO().getSex());
            statement.setString(5, dto.getExtendInfoDTO().getOrgNo());
            statement.setString(6, dto.getExtendInfoDTO().getRemark());
            statement.setTimestamp(7, new Timestamp(System.currentTimeMillis()));
            statement.execute();
        }, new JdbcConnectionOptions.JdbcConnectionOptionsBuilder().withUrl(ConfigConstant.DB.URL).withPassword(ConfigConstant.DB.USERPWD).withUsername(ConfigConstant.DB.USERNAME)
                                                                   .withDriverName(ConfigConstant.DB.DRIVERCLASS).build());

        dataStream.addSink(jdbcSink).name("save2db");
        System.out.println("流~：" + dataStream + "\t" + dataStream1 + "\t" + dataStream2);
        System.out.println("开始任务Kafka2MySQLWithStatebacked");
        env.execute("Kafka2MySQLWithStatebacked");
    }

    static FlinkKafkaConsumer<String> getKafkaDs(String kafkaServer, String topic, String groupId) {
        System.out.println("开始初始化配置,KAFKA:" + kafkaServer + ",TOPIC:" + topic + ",GROUPID:" + groupId);
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", kafkaServer);
        properties.setProperty("group.id", groupId);
        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(topic, new SimpleStringSchema(), properties);
        kafkaConsumer.setStartFromLatest();
        kafkaConsumer.setStartFromGroupOffsets();
        return kafkaConsumer;
    }
}
