package api;

import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;

import java.util.*;

/**
 * 作业2 通过flink对json 数据进行预处理
 */
public class SourceFromKafka {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000);
//        long checkpointInterval = env.getCheckpointConfig().getCheckpointInterval();

  //      System.out.println("checkpointInterval:" + checkpointInterval + "..." + env.getCheckpointConfig().getCheckpointingMode());

 //       CheckpointConfig config = env.getCheckpointConfig();
        // 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】
 //       config.setMinPauseBetweenCheckpoints(500);
        // 检查点必须在一分钟内完成，或者被丢弃【checkpoint的超时时间】
//        config.setCheckpointTimeout(60000);
        // 同一时间只允许进行一个检查点
 //       config.setMaxConcurrentCheckpoints(1);
//        env.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
        // 设置模式为exactly-once
 //       config.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的周期, 每隔1000 ms进行启动一个检查点
   //     config.setCheckpointInterval(1000);
 //       long checkpointInterval1 = config.getCheckpointInterval();
 //       System.out.println("......after:" + checkpointInterval1);
        // 任务流取消和故障时会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint，即退出后不删除checkpoint
 //       config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        String checkPointPath = new Path("hdfs://");
        /*FsStateBackend fsStateBackend = new FsStateBackend(new Path("hdfs://hdp-1:9000/flink/checkpoints"));
        env.setStateBackend(fsStateBackend);*/
//        env.setStateBackend(new MemoryStateBackend());


        String topic = "lagoudruid2";
        Properties props = new Properties();
        props.setProperty("bootstrap.servers","linux121:9092,linux122:9092,linux123:9092");
        props.setProperty("group.id","mygp");

        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
//        consumer.setStartFromGroupOffsets();
       // consumer.setCommitOffsetsOnCheckpoints(true);
//        consumer.setStartFromLatest();
        consumer.setStartFromEarliest();
        //从指定offset位置开始消费
        /*HashMap<KafkaTopicPartition, Long> specifiStartOffsets = new HashMap<>();
        specifiStartOffsets.put(new KafkaTopicPartition("animal",0),558L);
//        specifiStartOffsets.put(new KafkaTopicPartition("animal",1),0L);
//        specifiStartOffsets.put(new KafkaTopicPartition("animal",2),43L);
        consumer.setStartFromSpecificOffsets(specifiStartOffsets);*/

        DataStreamSource<String> data = env.addSource(consumer);
        int parallelism = data.getParallelism();
        System.out.println("...parallelism" + parallelism);

        SingleOutputStreamOperator<HashMap<String, Object>> flatMaped = data.flatMap(new FlatMapFunction<String, HashMap<String, Object>>() {
            @Override
            public void flatMap(String value, Collector<HashMap<String, Object>> collector) throws Exception {
                System.out.println(value);
                // 将kafka数据解析成对象
                OrignData data = JSON.parseObject(value, OrignData.class);

                for (Map<String, Object> product : data.products) {
                    System.out.println(product);
                    HashMap<String, Object> result = new HashMap<>();
                    result.put("ts", data.ts);
                    result.put("orderId", data.ts);
                    result.put("userId", data.ts);
                    result.put("orderStatusId", data.orderStatusId);
                    result.put("orderStatus", data.orderStatus);
                    result.put("payModeId", data.payModeId);
                    result.put("payMode", data.payMode);

                    result.put("payment", data.payment);
                    result.put("orderStatus", data.orderStatus);
                    result.put("product", JSON.toJSON(product));
                    collector.collect(result);
                }

            }

        });


        flatMaped.print();

        //将优化后的数据发到kafka
        FlinkKafkaProducer producer = new FlinkKafkaProducer("linux121:9092,linux122:9092,linux123:9092", "lagoudruid3", new
                SimpleStringSchema());

        flatMaped.addSink(producer);

        env.execute();
    }
}
