package com.yuninglong.mysql;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.http.HttpHost;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;

import java.util.*;

/**
 * @author: yuninglong
 */
public class CdcFromMysql {

    public static HttpHost[] loadHostArray(String nodes) {
        HttpHost[] httpHostArray=null;
        if (httpHostArray == null) {
            String[] split = nodes.split(",");
            httpHostArray = new HttpHost[split.length];

            for(int i = 0; i < split.length; ++i) {
                String item = split[i];
                httpHostArray[i] = new HttpHost(item.split(":")[0], Integer.parseInt(item.split(":")[1]), "http");
            }
        }

        return httpHostArray;
    }

    private static IndexRequest createIndexRequest(String element) {
        Map<String, Object> json = new HashMap<>();
        json.put("data", element);

        return Requests.indexRequest()
                .index("my-index")
                .type("my-type")
                .id(element)
                .source(json);
    }


    public static void main(String[] args) throws Exception {
        try {
            //TODO 1）初始化flink流处理的运行环境
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

            String host = "192.168.202.10";
            int port = 9000;
            //TODO 3）添加自定义数据源，读取数据
            DataStreamSource<String> orderDataStreamSource = env.socketTextStream(host, port);
            orderDataStreamSource.print();


            List<HttpHost> httpHostList=new ArrayList<>();
            HttpHost httpHost=new HttpHost("192.168.202.10", 9200, "http");

            httpHostList.add(httpHost);

            ElasticsearchSink elasticsearchSink=new ElasticsearchSink.Builder<String>(httpHostList, new ElasticsearchSinkFunction() {
                @Override
                public void process(Object o, RuntimeContext runtimeContext, RequestIndexer requestIndexer) {

                }
            }).build();

            orderDataStreamSource.addSink(elasticsearchSink);

//            DataStreamSource<String> dataStreamSource = mysqlLogic(env);

//            kafkaLogic(env, dataStreamSource);


            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    private static void kafkaLogic(StreamExecutionEnvironment env, DataStreamSource<String> dataStreamSource) {
        //            https://nightlies.apache.org/flink/flink-docs-release-1.13/zh/docs/connectors/datastream/kafka/

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "192.168.202.10:9092");

        FlinkKafkaProducer<String> myProducer = new FlinkKafkaProducer<String>(
                "testInput",                  // 目标 topic
                 new SimpleStringSchema(),    // 序列化 schema
                properties); // 容错  ,                  // producer 配置
//            FlinkKafkaProducer.Semantic.EXACTLY_ONCE

        dataStreamSource.print();
        dataStreamSource.addSink(myProducer);

        System.out.println("------------");

        properties.setProperty("group.id", "test");
        DataStream<String> stream = env
                .addSource(new FlinkKafkaConsumer<>("testInput", new SimpleStringSchema(), properties));
        stream.print();
    }

    private static DataStreamSource<String> mysqlLogic(StreamExecutionEnvironment env) {
        // 使用MySQLSource创建数据源
// 同时指定StringDebeziumDeserializationSchema，将CDC转换为String类型输出
        String hostname = "192.168.202.10";
        int port = 3301;
        String database = "demo";
        String username = "root";
        String password = "testYnlcom";

        MySqlSource<String> mySqlSource = MySqlSource.<String>builder().hostname(hostname).port(port)
                .databaseList(database).username(username).password(password)
                .tableList("demo.student")
                .serverId("1-100")
                .deserializer(new JsonDebeziumDeserializationSchema())
                .includeSchemaChanges(true) // output the schema changes as well
                .build();

        env.enableCheckpointing(3000);

// set the source parallelism to 1
        DataStreamSource<String> dataStreamSource= env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySqlParallelSource");

            dataStreamSource.print().setParallelism(1);
        return dataStreamSource;
    }


}
