package com.tmsb.es;

import com.alibaba.fastjson.JSON;
import com.tmsb.pojo.ESStockAlarm;
import com.tmsb.pojo.ESStockAvailable;
import com.tmsb.pojo.OrderGoods;
import com.tmsb.sink.AvailableBalanceSink;
import com.tmsb.utils.AlarmProcessWindowFunction;
import com.tmsb.utils.ConfUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.PredefinedOptions;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.OutputTag;

import java.io.IOException;
import java.util.concurrent.TimeUnit;

/**
 * @ Author     ：wxw
 * @ Date       ：Created in 15:15 2020/8/5
 * @ Description：${description}
 * @ Modified By：
 * @Version: 1.0
 */
public class StockAvailableAnalysis {
//    private static OutputTag<ESStockAlarm> late_date = new OutputTag<ESStockAlarm>("last_date") {
//    };
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        RocksDBStateBackend rocksDBStateBackend = null;
        try {
            rocksDBStateBackend = new RocksDBStateBackend(ConfUtil.getStockAvaAlarmCheckpointUri(), true);
        } catch (IOException e) {
            e.printStackTrace();
        }
        rocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM);
         env.setStateBackend(rocksDBStateBackend);

    //   env.setStateBackend(new FsStateBackend("file:///C:\\Users\\wuxiangwu\\Desktop\\mall"));
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.enableCheckpointing(ConfUtil.getCheckpointInterval(), CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(ConfUtil.getCheckpointMinPauseBetween());
      //  env.setParallelism(ConfUtil.getParallelism());
        env.setParallelism(6);

        //TODO 重启策略
        env.setRestartStrategy(RestartStrategies.failureRateRestart(
                ConfUtil.getfailureRate(), org.apache.flink.api.common.time.Time.of(ConfUtil.getfailureInterval(), TimeUnit.MINUTES),
                org.apache.flink.api.common.time.Time.of(ConfUtil.getdelayInterval(), TimeUnit.SECONDS)
        ));

        //TODO 获取stockAvailable的source
        DataStream<String> streamSource = env
                .addSource(new FlinkKafkaConsumer<>(ConfUtil.getTopicOfAvailable(), new SimpleStringSchema(), ConfUtil.getKafkaProperties()))
                .uid("available-source");

        //TODO 将source转换成availableStream
         SingleOutputStreamOperator<ESStockAlarm> availableStream = streamSource
                .map(s -> {
                    ESStockAlarm esStockAlarm = JSON.parseObject(s, ESStockAlarm.class);
                    esStockAlarm.setName(ConfUtil.getTopicOfAvailable());
                    return esStockAlarm;
                }).uid("available-map-alarm");
   //      availableStream.print("originalStream");

       //TODO 报警的逻辑处理
         SingleOutputStreamOperator<String> allStream = availableStream
                .assignTimestampsAndWatermarks(
                        new BoundedOutOfOrdernessTimestampExtractor<ESStockAlarm>(Time.seconds(30)) {
                            @Override
                            public long extractTimestamp(ESStockAlarm e) {
                                return e.getCreated_time() * 1000;
                            }
                        }
                ).uid("available-time-alarm")
                .keyBy("user_id", "name")
                .timeWindow(Time.seconds(10))
      //          .sideOutputLateData(late_date)
                .process(new AlarmProcessWindowFunction())
                .uid("available-process-alarm");

         //TODO 将报警内容输出到kafka sink
        allStream
        .addSink(new FlinkKafkaProducer<String>(
                        ConfUtil.getBootstrapServers(),
                        "TEST",
                        new SimpleStringSchema()))
        .setParallelism(1)
        .uid("available-sink-alarm");

  //      allStream.getSideOutput(late_date).print("late_date");

        //TODO 将source转换成stockAvailable流
        SingleOutputStreamOperator<ESStockAvailable> esAvailable = streamSource
                .map(s -> JSON.parseObject(s, ESStockAvailable.class))
                .uid("available-map-balance")
                .returns(ESStockAvailable.class)
                .filter( s ->  (s.getType() ==1 || s.getType() ==105))
                .uid("available-filter-balance");
    //    esAvailable.print("filterStream>>");

        //TODO 逻辑处理 sink
        esAvailable.addSink(new AvailableBalanceSink())
                .setParallelism(1)
                .uid("available-sink-balance");


        //TODO 执行
        try {
            env.execute("stockAvailable");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }


}
