package dwd ;

import lib.ExtendedConfluentRegistryAvroDeserializationSchema;
import lib.ApiClient ;

import com.datastax.driver.core.Cluster;
import org.apache.flink.avro.registry.confluent.shaded.io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;

import org.apache.flink.avro.shaded.org.apache.avro.Schema;
import org.apache.flink.avro.shaded.org.apache.avro.generic.GenericData;
import org.apache.flink.avro.shaded.org.apache.avro.generic.GenericRecord;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.formats.avro.RegistryAvroDeserializationSchema;
import org.apache.flink.formats.avro.SchemaCoder;
import org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema;
import org.apache.flink.formats.avro.registry.confluent.ConfluentSchemaRegistryCoder;
import org.apache.flink.formats.avro.AvroRowDataSerializationSchema;
import org.apache.flink.formats.avro.RowDataToAvroConverters;
import org.apache.flink.formats.avro.typeutils.AvroSchemaConverter;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.cassandra.CassandraTupleSink;
import org.apache.flink.streaming.connectors.cassandra.ClusterBuilder;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;

import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.types.DataType;

import static org.apache.flink.table.api.DataTypes.* ;

import com.fasterxml.jackson.databind.ObjectMapper;

import javax.annotation.Nullable;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;
import java.util.TimeZone;
import java.util.Map ;

import java.util.regex.Pattern;
import java.util.regex.Matcher;


public class IpDtDim {
    public static int local_dt(Date dt) {
        SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
        format.setTimeZone(TimeZone.getTimeZone("GMT+8:00"));
        String dt_s = format.format(dt);

        return Integer.parseInt(dt_s);
    }

    public static int local_tm(Date dt) {
        SimpleDateFormat format = new SimpleDateFormat("HHmmss");
        format.setTimeZone(TimeZone.getTimeZone("GMT+8:00"));
        String tm_s = format.format(dt);
        return Integer.parseInt(tm_s);
    }

    public static int ipToInteger(String ip) {
        int result = 0;
        String ipArr[] = ip.split("\\.");
        for (int i = 0; i < 4; i++) {
            result |= Integer.parseInt(ipArr[i]) << ((3 - i) * 8);
        }
        return result;
    }

    @SuppressWarnings("unchecked")
    public static void main(String[] args) throws Exception {
        String metaServerNodes = args[1] ;
        System.out.println("metaServerNodes:" + metaServerNodes) ;
        Map<String, String> dotenv= ApiClient.getMetaEnv(metaServerNodes) ;


        String kafkas_s = dotenv.getOrDefault("ACCESS_STREAMING_KAFKA_NODES", "127.0.0.1:9092");
        String srs_s = dotenv.getOrDefault("ACCESS_STREAMING_KAFKA_SR_NODES", "127.0.0.1:8081");
        String kafka_buffer_prefix = dotenv.getOrDefault("ACCESS_BUFFER_SCHEMA", "data_buffer_dev");
        String kafka_pipeline_prefix = dotenv.getOrDefault("ACCESS_PIPELINE_SCHEMA", "data_pipeline_dev");
        String cassandra_s = dotenv.getOrDefault("ACCESS_STREAMING_CASSANDRA_NODES", "127.0.0.1:9042");
        String cassandra_prefix = dotenv.getOrDefault("ACCESS_PIPELINE_SCHEMA", "data_pipeline_dev");


        Properties consumer_properties = new Properties();
        consumer_properties.setProperty("bootstrap.servers", kafkas_s);
        consumer_properties.setProperty("group.id", "data_pipeline__ip_dt_dim");
        consumer_properties.setProperty("auto.offset.reset", "earliest");

        StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment() ;

        String reader_schema_str = ApiClient.getMetaCommonSchema(metaServerNodes) ;
        Schema reader_schema = new Schema.Parser().parse(reader_schema_str);
        SourceFunction<GenericData.Array> kafka_source = new FlinkKafkaConsumer<>(
                String.format("%s_%s", kafka_buffer_prefix, "dc_sdk_push"),
                ExtendedConfluentRegistryAvroDeserializationSchema.forGenericArray(reader_schema, srs_s),
                consumer_properties).setCommitOffsetsOnCheckpoints(true);

        String writer_schema_str = "{\"type\":\"record\",\"name\":\"IpDtDim\",\"fields\":[{\"name\":\"ip\",\"type\":\"string\"},{\"name\":\"dt\",\"type\":\"int\"},{\"name\":\"tm\",\"type\":\"int\"}]}";

        Schema writer_schema = new Schema.Parser().parse(writer_schema_str);
        String ip_dt_dim_topic = String.format("%s_%s", kafka_pipeline_prefix, "ip_dt_dim");

        CassandraTupleSink<Tuple4<Integer, Integer, Integer, String>> cassandra_sink = new CassandraTupleSink<>(
                String.format("INSERT INTO %s.ip_dt_dim(ip_id, dt, tm, ip) values (?, ?, ?, ?);", cassandra_prefix),
                new ClusterBuilder() {
                    private static final long serialVersionUID = -1671641202177852775L;

                    @Override
                    protected Cluster buildCluster(Cluster.Builder builder) {
                        String[] hosts = cassandra_s.split(",");
                        String single_ip = hosts[0].split(":")[0];
                        int single_port = Integer.parseInt(hosts[0].split(":")[1]);
                        Cluster cluster = builder.addContactPoint(single_ip).withPort(single_port).build();
                        return cluster;
                    }
                });

        DataStream<Tuple3<String, Integer, Integer>> stream = see.addSource(kafka_source)
                .flatMap((GenericData.Array xs, Collector<Tuple3<String, Integer, Integer>> out) -> {
                    for (GenericRecord platform_record : (GenericData.Array<GenericRecord>) xs) {
                        GenericRecord common_record = (GenericRecord) platform_record.get("common");

                        GenericRecord basic_record = (GenericRecord) common_record.get("basic") ;
                        Object raw_upload_time = basic_record.get("upload_time");
                        int upload_dt = 0;
                        int upload_tm = 0;
                        Date ts = null;
                        if (raw_upload_time instanceof CharSequence && !raw_upload_time.toString().isEmpty()) {
                            String raw_upload_time_s = raw_upload_time.toString();
                            long upload_time_val = Long.parseLong(raw_upload_time_s);
                            ts = new Date(upload_time_val);
                        } else if (raw_upload_time instanceof Long) {
                            ts = new Date((Long) raw_upload_time);
                        } else {
                            ts = new Date();
                        }
                        upload_dt = local_dt(ts);
                        upload_tm = local_tm(ts);

                        for (GenericRecord event_record : (GenericData.Array<GenericRecord>) platform_record
                                .get("events")) {
                            GenericRecord location_info = (GenericRecord) event_record.get("location_info");
                            String ip = location_info.get("ip").toString();
                            if (ip == null || ip.isEmpty())
                                continue;
                            out.collect(new Tuple3<String, Integer, Integer>(ip, upload_dt, upload_tm));
                        }
                    }
                }).returns(Types.TUPLE(Types.STRING, Types.INT, Types.INT));

        stream.map(t -> new Tuple4<Integer, Integer, Integer, String>(ipToInteger(t.f0), t.f1, t.f2, t.f0))
                .returns(Types.TUPLE(Types.INT, Types.INT, Types.INT, Types.STRING))
                .addSink(cassandra_sink);

        DataType dataType = ROW(FIELD("ip", STRING()), FIELD("dt", INT()), FIELD("tm", INT()));
        RowType rowType = (RowType) dataType.getLogicalType();
        stream.map(
            t -> (RowData)GenericRowData.of(StringData.fromString(t.f0), t.f1, t.f2)
        ).addSink(
            new FlinkKafkaProducer<RowData>(kafkas_s, ip_dt_dim_topic,
                new AvroRowDataSerializationSchema(
                    rowType
                  , ConfluentRegistryAvroSerializationSchema.forGeneric(
                      ip_dt_dim_topic + "-value"
                    , AvroSchemaConverter.convertToSchema(rowType) 
                    , srs_s
                    )
                  , RowDataToAvroConverters.createConverter(rowType)
                )
            )
        ) ;

        System.out.println("execute stream environment...");
	
	try {
        	see.execute() ; 
	} catch (Exception e) {
		e.printStackTrace() ;
        }
        System.out.println("unexpected finished!");
    }
}
