package demo.linkdoris;

import com.alibaba.fastjson.JSONObject;
import com.we.flink.utils.WeKafkaPropertyReader;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;


public class LinkDorisTest {
    public static final String RELEASEPROP =
            "risk/behavior/repay/kfk_adm_repay_user_label_prod.properties";
    public static Logger LOG = LoggerFactory.getLogger(LinkDorisTest.class);

    public static void main(String[] args) throws IOException {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        try {
            WeKafkaPropertyReader paramReader = WeKafkaPropertyReader.init(RELEASEPROP);
            /** RocksDB */
            env.setStateBackend(new RocksDBStateBackend(paramReader.getRocksDBBackendUrl()));
            /** checkpoint configure */
            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(10 * 60 * 1000); // ms
            ckConf.setCheckpointTimeout(60 * 60 * 1000);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500);
            ckConf.enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

            ckConf.enableUnalignedCheckpoints();


            DataStreamSource<String> socketSource = env.socketTextStream("rtmdw10.cdh6.app.rrd", 9999);

            SingleOutputStreamOperator<String> out = socketSource
                    .flatMap(new AdmRpyUserLabelRichFlatMapFunc());

           out.print();

//            /** sink to Kafka */
//            String sinkkfkTopic = paramReader.getKfkTopic();
//            Properties sinkProp = new Properties();
//            sinkProp.setProperty("bootstrap.servers", paramReader.getKfkBootStrapServer());
//            sinkProp.setProperty("acks", "all");
//            int sinkkfkPartitions = paramReader.getKfkPartitions();
//
//            FlinkKafkaProducer<String> userLabelAllFlinkKafkaProducer =
//                    new FlinkKafkaProducer<String>(
//                            sinkkfkTopic,
//                            new WeKafkaKeyedSerializationSchema(),
//                            sinkProp,
//                            java.util.Optional.of(new WeKafkaCustomPartitioner()));
//
//            out.addSink(userLabelAllFlinkKafkaProducer).setParallelism(sinkkfkPartitions);

            env.execute(LinkDorisTest.class.toString());
        } catch (Exception e) {
            LOG.error("Exception: " + e.getMessage());
            e.printStackTrace();
        }
    }

    private static class AdmRpyUserLabelRichFlatMapFunc extends RichFlatMapFunction<String, String> {
        public static final String sql = "SELECT recv_time FROM gdm.var_eu_sms_inc\n" +
                "where user_key = '000093be4a2eb65ca51b7fa261ad1a0c'\n" +
                "order by dt desc\n" +
                "limit 1";
        @Override
        public void flatMap(String value, Collector<String> out) throws Exception {
            try {
                while (true) {
                    Connection connection = DruidUtils.getConnection();
                    PreparedStatement statement = connection.prepareStatement(sql);
                    ResultSet resultSet = statement.executeQuery();
                    while(resultSet.next()) {
                        LOG.warn("recv_time: " + resultSet.getString("recv_time"));
                    }
                    Thread.sleep(200);
                }
            } catch (Exception e) {
                LOG.error("flatMap excep msg：" + e.getMessage());
                LOG.error("flatMap excep：" + e.toString());
            }
        }
    }



    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            String key = jsonObject.getString("user_key");
            return key.getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            if (LOG.isDebugEnabled()) {
                LOG.info(
                        " partitions: "
                                + partitions.length
                                + " partition: "
                                + partition
                                + " key: "
                                + new String(key));
            }
            return partition;
        }
    }
}
