package com.navinfo.platform.etl.flink.protocol;

import com.navinfo.opentsp.platform.location.protocol.common.LCLocationData;
import com.navinfo.opentsp.platform.location.protocol.common.RealTimeDataPb;
import com.navinfo.platform.etl.enums.SystemType;
import com.navinfo.platform.etl.flink.custom.BoundedOutOfOrdernessWatermarks;
import com.navinfo.platform.etl.flink.protocol.entity.hy.OdsLocation0200Pdi;
import com.navinfo.platform.etl.flink.protocol.source.KafkaSource;
import com.navinfo.platform.etl.util.ConfigUtils;
import com.navinfo.platform.etl.util.DateUtils;
import com.twitter.chill.protobuf.ProtobufSerializer;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.FileUtils;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.CheckpointingOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connectors.hive.HiveOptions;
import org.apache.flink.runtime.state.StateBackendLoader;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.*;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.apache.flink.util.DynamicCodeLoadingException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.time.Duration;
import java.util.stream.Stream;


/**
 * 位置数据0200的ETL
 * @author: web
 * @date: 2020-09-03
 **/
public class LocationDataHive {
    private static final Logger logger = LoggerFactory.getLogger(LocationDataHive.class);

    public static void main(String[] args) throws Exception{
        //初始化StreamTableEnvironment
        ParameterTool config = ConfigUtils.loadJarPathConfig(args, "config.properties");
        StreamExecutionEnvironment sEnv = initStreamExecutionEnvironment(config);
        StreamTableEnvironment tEnv = initStreamTableEnvironment(sEnv);

        //初始化Hive的Catalog
        String hiveConfDir = config.get("hive.conf.dir");
        String defaultCatalog = config.get("system.sql.default.catalog");
        String defaultDatabase = config.get("system.sql.default.database");
        tEnv.registerCatalog(defaultCatalog, new HiveCatalog(defaultCatalog, defaultDatabase, hiveConfDir));
        tEnv.useCatalog(defaultCatalog);
        tEnv.useDatabase(defaultDatabase);

        //创建数据输入
        KafkaSource kafkaSource = new KafkaSource(args);
        int partitionSize = kafkaSource.locationPartitionSize();
        DataStream<OdsLocation0200Pdi> locationStream = sEnv.addSource(kafkaSource.locationData()).setParallelism(partitionSize).name("source-kafka-location").uid("source-kafka-location")
            //过滤掉一周之后的数据
            .filter(point -> point.getGpsDate()*1000 <= System.currentTimeMillis()+Duration.ofDays(1).toMillis()).setParallelism(partitionSize).name("location-filter").uid("location-filter")
            //添加事件时间和水印策略
            .assignTimestampsAndWatermarks(WatermarkStrategy
                    .<LCLocationData.LocationData>forGenerator((ctx) -> new BoundedOutOfOrdernessWatermarks<>(Duration.ofSeconds(10), Duration.ofDays(1) ))
                    .withTimestampAssigner((element, recordTimestamp) -> element.getGpsDate()*1000L)
                    .withIdleness(Duration.ofMinutes(5)))
                    .setParallelism(partitionSize).name("location-generator-watermarks").uid("location-generator-watermarks")
            //数据分区(年月日+小时+tid求余)
            .keyBy(pb -> DateUtils.format(pb.getGpsDate() * 1000, "yyyyMMddHH") + pb.getTerminalId()%partitionSize)
            //转换成入库POJO
            .map(pb -> OdsLocation0200Pdi.of(pb, partitionSize)).name("location-map").uid("location-map");

        //通过DataStream创建Table
        String locationView = "kafka_location";
        tEnv.createTemporaryView(locationView, locationStream);

        //初始化hive表
        tEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        String locationName = config.get("hive.location.table.name");
        if(Stream.of(tEnv.listTables()).noneMatch(locationName::equals)){
            String hiveSql = FileUtils.readFileToString(ConfigUtils.loadTableFile("table/hy/ods_location_0200_pdi.hql"), Charsets.UTF_8);
            logger.error("执行的建表语句为：\n"+hiveSql);
            tEnv.executeSql(hiveSql);
        }

        //导入hive表
        String insertHive = "INSERT INTO "+locationName+
            " SELECT tid, alarm, status, longitude, latitude, originalLng, originalLat, height, speed, direction, " +
                "gpsDate, mileage, receiveDate, ispatch, oil, recorderSpeed, starstatus, starnumber, signalstatus, iostatus, " +
                "analogad0, analogad1, signalstrength, satellitenumber, isvalid, additionalarm, batterypower, electricvehicle, alarmfilter, standardMileage, " +
                "standardFuelCon, ecuDate, temperatureadditionVehtype, temperatureadditionTyreposition, temperatureadditionTyrecondition, temperatureadditionTyrepressure, temperatureadditionTyretemperature, temperatureadditionTemalarmthreshold, temperatureadditionTyrehalarmthreshold, temperatureadditionTyrelalarmthreshold, " +
                "temperatureadditionTyrenomimalvalue, analysisdataTurningangle, analysisdataLowoildrivingvalue, analysisdataVehiclespeedfromecu, analysisdataRpmwhenalarming, analysisdataVelocitychangevalue, analysisdataCurrentgearshift, analysisdataCurrentrpm, analysisdataBraketimes, analysisdataClutchtimes, " +
                "analysisdataRetardertimes, analysisdataAbstimes, analysisdataReversetimes, analysisdataDeviceidentity, batteryinfoMotortemperature, batteryinfoMotorcontroltemp, batteryinfoMcufaultcode, batteryinfoBmsstatus, batteryinfoBatteryavetem, batteryinfoBatterycurrent, " +
                "batteryinfoTotalbatteryv, batteryinfoBatteryfaultcode, batteryinfoBatterymaxtem, batteryinfoBatterymintem, batteryinfoBatteryminsoc, batteryinfoMaxchargingv, batteryinfoMaxcharginge, batteryinfoTotalstatus, batteryinfoVehiclespeed, batteryinfoShiftstatus, " +
                "batteryinfoVcufaultcode, batteryinfoEndurancemileage, batteryinfoAcceleratorpedal, batteryinfoBrakepedal, batteryinfoModelinfo, batteryinfoSwitchinfo, batteryinfoElectricattachment, batteryinfoMotorrpm, temalarmlist, alarmidentifylist, " +
                "defenceadditionlist, parkingadditionlist, modulevoltageslist, staytimeparkingadditionlist, speedadditionlist, areaadditionlist, routeadditionlist, canclebreakdownadditionBreakdownlist, breakdownadditionBreakdownlist, statusaddition, " +
                "createTime, staytime, serialnumber, lngtotalremaininggas, dt, hr, hashtid " +
            " FROM "+locationView ;

         logger.error("导入Hive表"+ locationName +"的ETL语句为：\n"+ insertHive);
         tEnv.executeSql( insertHive);
    }

    /**
     * 初始化StreamExecutionEnvironment
     * @param config 系统参数
     * @return StreamExecutionEnvironment
     */
    public static StreamExecutionEnvironment initStreamExecutionEnvironment(ParameterTool config) throws IOException, DynamicCodeLoadingException {
        String systemTypeKEY = "system.type";
        if(!config.has(systemTypeKEY)){
            logger.error("请配置位置数据0200的ETL任务的系统类型"+ systemTypeKEY +"：1东风，2青汽，3一汽，4红岩");
            System.exit(1);
        }else{
            SystemType systemType = SystemType.valueOf(config.getInt(systemTypeKEY));
            logger.error("位置数据0200的ETL任务，系统类型：{},{}", systemType.getType(), systemType.getDesc());
        }

        //创建StreamExecutionEnvironment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置时间属性
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        //设置Checkpoint属性
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        //Barrier在精确一次的语义保证时需要进行对齐，AT_LEAST_ONCE关闭Barrier对齐来提高性能。
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        checkpointConfig.setCheckpointInterval(Time.minutes(config.getInt("system.checkpoint.interval")).toMilliseconds());
        checkpointConfig.setCheckpointTimeout(Time.minutes(config.getInt("system.checkpoint.timeout")).toMilliseconds());
        checkpointConfig.setMinPauseBetweenCheckpoints(Time.minutes(config.getInt("system.checkpoint.between")).toMilliseconds());
        checkpointConfig.setMaxConcurrentCheckpoints(1);
        checkpointConfig.setPreferCheckpointForRecovery(true);
        checkpointConfig.setTolerableCheckpointFailureNumber(100); //checkpoint失败容忍次数
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //StateBackend配置，如果hdfs权限有问题，需设置系统环境变量HADOOP_USER_NAME=hdfs
        Configuration stateConf = new Configuration();
        stateConf.setString(CheckpointingOptions.STATE_BACKEND, "rocksdb");
        stateConf.setBoolean(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, true);
        stateConf.setInteger(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS, 2);
        stateConf.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, config.get("system.checkpoint.dir"));
        stateConf.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, config.get("system.savepoint.dir"));
        env.setStateBackend(StateBackendLoader.loadStateBackendFromConfig(stateConf, ClassLoader.getSystemClassLoader(), logger));

        //执行参数配置
        ExecutionConfig executionConfig = env.getConfig();
        executionConfig.setMaxParallelism(400);
        executionConfig.setUseSnapshotCompression(true);
        executionConfig.setGlobalJobParameters(config);

        //注册PB数据的序列化器
        executionConfig.registerTypeWithKryoSerializer(LCLocationData.LocationData.class, ProtobufSerializer.class);
        executionConfig.registerTypeWithKryoSerializer(RealTimeDataPb.RealTimeData.class, ProtobufSerializer.class);
        return env;
    }

    /**
     * 初始化StreamTableEnvironment
     * @param sEnv StreamExecutionEnvironment
     * @return StreamTableEnvironment
     */
    public static StreamTableEnvironment initStreamTableEnvironment(StreamExecutionEnvironment sEnv) {
        EnvironmentSettings envSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(sEnv, envSettings);

        TableConfig config = tEnv.getConfig();
        //设置空闲状态的保留时间
        config.setIdleStateRetentionTime(Time.hours(1), Time.hours(12));
        //config.setLocalTimeZone(ZoneId.of("Asia/Shanghai"));

        Configuration configuration = tEnv.getConfig().getConfiguration();
        //重点，必须配置为false，使用Flink自己的StreamingFileSink写Parquet或Orc文件
        //默认为true使用HadoopPathBasedBulkFormatBuilder写Parquet或Orc文件
        configuration.set(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_WRITER, false);

        //configuration.setString("table.exec.mini-batch.enabled", "true"); //批量聚合优化，解决一条一条数据计算的状态开销；
        //configuration.setString("table.exec.mini-batch.allow-latency", "5 s"); // use 5 seconds to buffer input records
        //configuration.setString("table.exec.mini-batch.size", "20000"); // the maximum number of records can be buffered by
        //configuration.setString("table.optimizer.agg-phase-strategy", "TWO_PHASE"); //本地聚合优化，解决数据倾斜问题
        //configuration.setString("table.optimizer.distinct-agg.split.enabled", "true");//拆分distinct聚合优化，解决distinct切斜问题


        return tEnv;
    }
}
