package com.zhu.app;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.zhu.bean.CutCommentWordCount;
import com.zhu.bean.UserCommentKeyword;
import com.zhu.function.PythonScriptExecute;
import com.zhu.function.PythonScriptExecuteProcessFunction;
import com.zhu.util.ClickHouseUtil;
import com.zhu.util.ClusterKafkaUtil;
import com.zhu.util.DataFormatUtil;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.datastream.WindowedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
import java.time.Duration;
import java.util.*;
import java.util.function.BiFunction;


/**
 * SpringBoot 后台 用户添加评论到业务数据库 Mysql -> Maxwell监控到数据库变化
 * -> 动态传输到Kafka 统一topic
 * -> Flink 获取kafka数据源 -> FlinkApp处理(针对每条数据 执行python分词脚本)
 *
 * -> 水位线 watermark create_time -> keyBy 好评差评  -> window (理论上一天的窗口 这里为了看到效果10s输出一次） 设置计算时间(每隔多少时间计算一次）
 * -> 窗口中数据 windowProcessFunction( 分词集合 ) =>
 * 状态编程 保留当前排名最高的词语 累加
 * wordCount =>选取排名最高的三个词语
 * =>转换数据结构 => 写入到mysql中
 * javaBean:  startTime stopTime 窗口起始时间 评论类型(好评 差评) 关键词1 关键词2 关键词3  三个关键词拼接字符串? 不利于指标分析->
 * mysql表结构 id start_time end_time comment keyword1 keyword2 keyword3 date?
 *
 *click的 表 字段 id start_time end_time comment keyword1 keyword2 keyword3 ts
 * 
 * App
 * 执行幻境 -> checkpoint -> 过滤数据 ->  转换成jsonObj -> process(执行pyton脚本) 获得分词集合对象  ArrayList<String>
 *     -> watermark -> keyword  keyBy comment -> window( reduce windowProcessFunction)  -> 写入到ClickHouse 
 */
public class KeyWordProcessApp {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(4); //kafka topic_partition

        //checkpoint
        /*
        streamExecutionEnvironment.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE); //精确一次
        streamExecutionEnvironment.setStateBackend(new HashMapStateBackend());
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointStorage(ClusterParametersConfig.HDFS_CHECKPOINT_FILE_DIR);  //检查点保存在hdfs
        System.setProperty("HADOOP_USER_NAME", "zhu");
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);  //TimeOut
        streamExecutionEnvironment.getCheckpointConfig().setMaxConcurrentCheckpoints(2);  //最大共存检查点
        streamExecutionEnvironment.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5 * 1000L));
         */


        //todo 从kafka中获取数据 kafkaDStream
        /*
        maxwell ? FlinkCDC ? 动态读取 适合配置流 这里采用maxwell
        {"database":"flink","table":"spu_info","type":"insert","ts":1682429037,
        "xid":784,"commit":true,"data":{"id":13,"spu_name":"zhu","description":"zhu","category3_id":322,"tm_id":3}}
        {databases table type insert
         */
        String topic = "topic_db";  //topic_db
        String groupId = "comment_keyword_process_2023";
        DataStreamSource<String> kafkaDStream = streamExecutionEnvironment.addSource(ClusterKafkaUtil.getFlinkKafkaConsumer(topic, groupId));
        //通常来说是插入 数据  maxwell的数据格式 insert update delete
        //todo 过滤数据并转换数据格式
        SingleOutputStreamOperator<JSONObject> jsonObjDStream = kafkaDStream.flatMap(new FlatMapFunction<String, JSONObject>() {
            @Override
            public void flatMap(String data, Collector<JSONObject> collector) throws Exception {
                try {
                    JSONObject jsonObject = JSON.parseObject(data);
                    //用户可能会删除评论 对应数据库中的操作应该为 delete
                    if("flink".equals(jsonObject.getString("database")) && "comment_emotions".equals(jsonObject.getString("table"))) {
                        if ("insert".equals(jsonObject.getString("type")) || "upsert".equals(jsonObject.getString("type"))) {
                            JSONObject tableData = jsonObject.getJSONObject("data");
                            collector.collect(tableData);
                        }
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });
       // jsonObjDStream.print("json>>>");



        //todo 执行python脚本
        SingleOutputStreamOperator<JSONObject> cutCommentJsonObjDStream = jsonObjDStream.process(new PythonScriptExecuteProcessFunction() {
            //这个方法处理粒度每一数据  在这里new对象或 加载配置会造成大量IO 影响性能
            final PythonScriptExecute scriptExecute = PythonScriptExecute.getInstance(getPythonProperties());

            @Override
            public void processElement(JSONObject jsonObject, ProcessFunction<JSONObject, JSONObject>.Context context,
                                       Collector<JSONObject> collector) throws Exception {
                String comment = jsonObject.getString("comment");
                ArrayList<String> sysList = new ArrayList<>();
                sysList.add(comment);
                ArrayList<String> cutComment = scriptExecute.executePythonScript(sysList);
                jsonObject.put("cut", cutComment);
                collector.collect(jsonObject);
            }
        });
        cutCommentJsonObjDStream.print("cut >>>");

         /*
        python脚本 执行完毕的数据
        cut >>>:4> {"
        emotion":"正向",
        "cut":["天","今天","去","吃","他家","榴莲","椰子","鸡","兼职","太","好吃","家","人们","无脑"],
        "create_time":"2023-04-27 05:46:54",
        "user_id":"张某人",
        "comment":"天啊，今天去吃了他家的榴莲椰子鸡，兼职太好吃了家人们，无脑冲！！！",
        "id":10026}
         */

        //todo watermark ts maxwell提供的ts data  以事件事件为准 create_time  水位线是为了开窗聚合 waterMark -> window
        SingleOutputStreamOperator<JSONObject> commentWithWaterMarkDStream = cutCommentJsonObjDStream.assignTimestampsAndWatermarks(WatermarkStrategy.<JSONObject>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                .withTimestampAssigner(new SerializableTimestampAssigner<JSONObject>() {
                    @Override
                    public long extractTimestamp(JSONObject commentJson, long l) {
                        return DataFormatUtil.toTs(commentJson.getString("create_time"),true);
                    }
                }));

        //todo 按照情感分组 分组
        KeyedStream<JSONObject, String> keyedByCommentDStream =
                commentWithWaterMarkDStream.keyBy(jsonObject -> jsonObject.getString("emotion"));

        //todo window 使用滚动窗口
        WindowedStream<JSONObject, String, TimeWindow> windowCommentDStream =
                keyedByCommentDStream.window(TumblingEventTimeWindows.of(Time.seconds(10)));


        //process
        /*
        处理 求关键词  top3 状态编程
        算子选择? reduce (reduceFunction 针对数据   processWindowFunction 针对窗口)
        processWindow 是一个特别重的操作  会缓存每条数据 窗口触发计算时才会遍历集合 占用内存
        使用增量聚合函数 和windowProcessFunction 结合  最后补充窗口信息即可
        aggregate ?  中间状态 ? HashMap  最后输出状态 javaBean
        {comment:正向,create_time:2023-03-13,cut[蛋糕，好吃]}
        {comment:正向,create_time:2023-03-13,cut[蛋糕，好吃]}
         wordCountBean(comment=正向,wordCount = Map<String,Integer>)
         MapState<String,Integer> <word,count>
        window
        json1
        json2    =>
        触发器 onTimer()?
         */

        SingleOutputStreamOperator<UserCommentKeyword> resultKeywordDStream = windowCommentDStream.aggregate(
                new AggregateFunction<JSONObject, CutCommentWordCount, UserCommentKeyword>() {
                    @Override
                    public CutCommentWordCount createAccumulator() {
                        return new CutCommentWordCount();
                    }

                    @Override
                    public CutCommentWordCount add(JSONObject jsonObject, CutCommentWordCount accCutCommentWordCount) {
                        String emotion = jsonObject.getString("emotion");
                   //     String comment = jsonObject.getString("comment");
                        accCutCommentWordCount.setEmotion(emotion); //结果唯一
                       // String[] cutWordArray = (String[]) jsonObject.getJSONArray("cut").toArray();
                        Object[] cuts = jsonObject.getJSONArray("cut").toArray();

                        HashMap<String, Integer> accCountMap = accCutCommentWordCount.getAccWordCountMap();
                        if (accCountMap != null) {
                            for (Object word : cuts) {
                                accCountMap.put(word.toString(), accCountMap.containsKey(word.toString()) ? accCountMap.get(word.toString()) + 1 : 1);
                            }
                        }else {
                            accCountMap = new HashMap<>();
                            for (Object word : cuts) {
                                accCountMap.put(word.toString(),1);
                            }
                        }
                        accCutCommentWordCount.setAccWordCountMap(accCountMap);
                        return accCutCommentWordCount;
                    }

                    /**
                     * 获取最终结果  封装成JavaBean
                     * 最后得到的结果 comment Map(word:count)
                     *
                     * @param accCutCommentWordCount
                     * @return
                     */

                    @Override
                    public UserCommentKeyword getResult(CutCommentWordCount accCutCommentWordCount) {
                        String emotion = accCutCommentWordCount.getEmotion();
                        HashMap<String, Integer> wordCountMap = accCutCommentWordCount.getAccWordCountMap();

                        ArrayList<Map.Entry<String, Integer>> wordCountList = new ArrayList<>(wordCountMap.entrySet());
                        wordCountList.sort((a, b) -> a.getValue() - b.getValue());
                        String keyword1 = wordCountList.get(0).getKey();
                        String keyword2 = wordCountList.get(1).getKey();
                        String keyword3 = wordCountList.get(2).getKey();
                        return new UserCommentKeyword(null, null, emotion, keyword1, keyword2, keyword3, null);
                        //对一个Map中元素排序  top3 ?
                    }

                    @Override
                    public CutCommentWordCount merge(CutCommentWordCount accCutCommentWordCount1, CutCommentWordCount accCutCommentWordCount2) {

                        //合并两个 hashMap中的值 两个分区的map合并 10s窗口 ？ 数据高峰期 内存
                        HashMap<String, Integer> accWordCountMap1 = accCutCommentWordCount1.getAccWordCountMap();
                        HashMap<String, Integer> accWordCountMap2 = accCutCommentWordCount2.getAccWordCountMap();

                        accWordCountMap1.forEach((key, value) -> {
                            accWordCountMap2.merge(key, value, new BiFunction<Integer, Integer, Integer>() {
                                @Override
                                public Integer apply(Integer integer, Integer integer2) {
                                    return integer + integer2;
                                }
                            });
                        });
                        return accCutCommentWordCount2;
                    }
                },
                new ProcessWindowFunction<UserCommentKeyword, UserCommentKeyword, String, TimeWindow>() {
                    @Override
                    public void process(String emotion, ProcessWindowFunction<UserCommentKeyword,
                            UserCommentKeyword, String, TimeWindow>.Context context, Iterable<UserCommentKeyword> iterable,
                                        Collector<UserCommentKeyword> collector) throws Exception {
                        //迭代器中为最后一次的聚合结果
                        UserCommentKeyword resultUserCommentKeyWord = iterable.iterator().next();
                        String start = DataFormatUtil.toYmdHms(context.window().getStart());
                        String end = DataFormatUtil.toYmdHms(context.window().getEnd());
                        resultUserCommentKeyWord.setStartTime(start);
                        resultUserCommentKeyWord.setEndTime(end);
                        resultUserCommentKeyWord.setTs(System.currentTimeMillis());  //处理时间
                        collector.collect(resultUserCommentKeyWord);
                    }
                }

        );


        //todo 处理方法二 全窗口函数 processwindowFunction + 状态编程
        /*
        windowCommentDStream.process(new ProcessWindowFunction<JSONObject, UserCommentKeyword, String, TimeWindow>() {

            private MapState<String, Integer> wordCountState;

            @Override
            public void process(String comment, ProcessWindowFunction<JSONObject, UserCommentKeyword, String, TimeWindow>.Context context,
                                Iterable<JSONObject> iterable, Collector<UserCommentKeyword> collector) throws Exception {
                if (wordCountState == null || wordCountState.values() == null){

                }
            }

            @Override
            public void open(Configuration parameters) throws Exception {
                super.open(parameters);
                MapStateDescriptor<String, Integer> wordMapDescriptor = new MapStateDescriptor<>("wordCount", String.class, Integer.class);
                wordCountState = getRuntimeContext().getMapState(wordMapDescriptor);
            }
        });
         */


        //todo 写入clickHouse
        resultKeywordDStream.print("keyword >>>");
        resultKeywordDStream.addSink(ClickHouseUtil.getClickHouseSinkFunction(
                "insert into dws_interaction_keyword_view_window values (?,?,?,?,?,?,?)"
        ));




        streamExecutionEnvironment.execute();

    }
}
