package com.navinfo.opentsp.platform.computing.analysis.service;

import com.google.common.collect.Lists;
import com.mongodb.spark.MongoSpark;
import com.mongodb.spark.config.WriteConfig;
import com.navinfo.opentsp.platform.computing.analysis.entity.truckload.TruckLoadEstimated;
import com.navinfo.opentsp.platform.computing.analysis.entity.truckload.TruckLoadMonthMetrics;
import com.navinfo.opentsp.platform.computing.analysis.util.DateUtils;
import com.navinfo.opentsp.platform.computing.analysis.util.GsonUtil;
import com.navinfo.opentsp.platform.computing.analysis.util.PropertiesUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import org.bson.Document;
import scala.Tuple2;

import java.io.IOException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.time.*;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.stream.Collectors;


/**
 * 月度计算各车的最小和最大载荷，结果存入hbase
 */
public class TruckLoadService implements Serializable {
    /**
     * 车辆最大载荷
     */
    private static final int MAXLOAD=1000;
    /**
     * 车辆最小载荷
     */
    private static final int MINLOAD=10;
    /**
     * 车辆最大油耗
     */
    private static final int MAXOIL=80;
    /**
     * 车辆最小油耗
     */
    private static final int MINOIL=5;
    /**
     * 行程mongodb表头，每周一一个集合
     */
    private static final String NAME_PREFIX = "StatisticData_";
    /**
     *车辆载荷月度mongodb表头
     */
    private static final String COLLECTION_NAME_PREFIX = "TruckLoadMonthMetrics_";
    /**
     * HBase 表列族名
     */
    private static final  byte[] F = Bytes.toBytes("f");
    /**
     * 统计时间
     */
    private Date curDate;
    /**
     * spark sqlcontext
     */
    private SQLContext spark;
    /**
     * 获取mongodb具体集合名称
     */
    private String collectionName = "";
    /**
     * 调试标记
     */
    private boolean debug = false;

    /**
     * 初始化
     * @param jsc JavaSparkContext
     * @param curDate 统计时间
     */
    public void init(JavaSparkContext jsc, Date curDate) {
        this.spark = new SQLContext(jsc);
        this.curDate = curDate;
//        this.MAXLOAD = Integer.parseInt(PropertiesUtil.getProperties("truckload.maxload"));
//        this.MINLOAD = Integer.parseInt(PropertiesUtil.getProperties("truckload.minload"));
//        this.MAXOIL = Integer.parseInt(PropertiesUtil.getProperties("truckload.maxoil"));
//        this.MINOIL = Integer.parseInt(PropertiesUtil.getProperties("truckload.minoil"));
        this.debug = Boolean.parseBoolean(PropertiesUtil.getProperties("monitor.debug")==null?
                "false":PropertiesUtil.getProperties("monitor.debug"));
    }

    /**
     * 设置mongodb输出集合名称
     * @param date 集合周一具体日期
     */
    private void setCollectionName(Date date) {
        collectionName = COLLECTION_NAME_PREFIX + DateUtils.format(date, DateUtils.DateFormat.YYYYMM);
        if (debug) {
            System.out.println("collectionName => " + collectionName);
        }
    }

    /**
     * 执行统计方法
     * @param jsc JavaSparkContext
     */
    public void run(JavaSparkContext jsc) {
        //开始日期转换成Date类型 YYYY-MM-DD
        ZoneId zoneId = ZoneId.systemDefault();
        Instant startInstant = curDate.toInstant();
        //setCollectionName(curDate);
        LocalDate startLocalDate = startInstant.atZone(zoneId).toLocalDate();

        // 指定日期及其前4个集合的行程数据载荷估计结果，获取mongodb行程表数据
        JavaRDD<Row> curRdd = getTripRDD(jsc, startLocalDate);

//        for (int i = 1; i < 5; i++) {
//            curRdd = curRdd.union(getTripRDD(jsc, startLocalDate.minusWeeks(i)));
//        }



        //将JavaRDD<Document>转化为JavaPairRDD<Long, TruckLoadEstimated>
        JavaPairRDD<Long, TruckLoadEstimated> monthRdd = convertRdd(curRdd, curDate);

        //统计月度各车载荷的最小和最大载荷和油耗数值
        JavaRDD<TruckLoadMonthMetrics> loadRdd = computeTruckLoad(monthRdd, curDate);

        //将月度各车载荷的最小和最大载荷和油耗数值存入hbase
        saveResult2HBase(loadRdd,jsc);
    }

    /**
     * 类型转换
     * @param ld LocalDate类型数据
     * @return String类型数据
     */
    private String changeLocalDate2String(LocalDate ld) {
        DateTimeFormatter fmt = DateTimeFormatter.ofPattern("yyyyMM");
        return ld.format(fmt);
    }

    /**
     * 获取某表所在周的mongodb行程表数据
     * @param jsc JavaSparkContext
     * @param curDate 统计日期
     * @return 某表所在周的mongodb行程表数据
     */
    private JavaRDD<Row> getTripRDD(JavaSparkContext jsc, LocalDate curDate) {
        LocalDate lastmonthDate = curDate.minusMonths(1);
        String yearMonth = changeLocalDate2String(lastmonthDate);

        Dataset<Row> statisticDataDF = spark.read().parquet("/tripanalysis/trip/"+yearMonth.substring(0,4)+"/"+
                yearMonth.substring(4)+"/*/data/*");
        /**
         * 注册成为临时表以供后续的SQL查询操作
         */
        statisticDataDF.registerTempTable("sdata");
        /**
         * 进行数据的多维度分析
         */
        Dataset<Row> result = spark.sql("select terminalId,vehicleEstimatedLoad,routeStartTime," +
                "routeEndTime,levelRoadDrivingFuelConsumption,levelRoadDrivingMileage from sdata");

        if (debug) {
            System.out.println(result.count());
            result.show();
            result.printSchema();
        }
        return result.javaRDD();
    }

    /**
     * 将JavaRDD<Document>转化为JavaPairRDD<Long, TruckLoadEstimated>
     * @param rdd Document的RDD数据
     * @param date 统计日期
     * @return JavaPairRDD<Long, TruckLoadEstimated>格式的RDD数据
     */
    private JavaPairRDD<Long, TruckLoadEstimated> convertRdd(JavaRDD<Row> rdd, Date date) {
        return rdd.mapToPair(new PairFunction<Row, Long, TruckLoadEstimated>() {
            @Override
            public Tuple2<Long, TruckLoadEstimated> call(Row doc) throws Exception {
                TruckLoadEstimated rt = new TruckLoadEstimated();
                rt.setTimeEstimated(date.getTime());
                // 终端唯一标识
                rt.setTerminalId(Long.parseLong(String.valueOf(doc.get(0))));
                // 估计载荷
                rt.setVehicleestimatedload(Integer.parseInt(String.valueOf(doc.get(1))));
                rt.setLevelRoadDrivingFuelConsumption(Integer.parseInt(String.valueOf(doc.get(4))));
                rt.setLevelRoadDrivingMileage(Integer.parseInt(String.valueOf(doc.get(5))));
                rt.setRouteStartTime(Integer.parseInt(String.valueOf(doc.get(2))));
                rt.setRouteEndTime(Integer.parseInt(String.valueOf(doc.get(3))));

                if ((rt.getLevelRoadDrivingFuelConsumption()!=0)&&(rt.getLevelRoadDrivingMileage()!=0)) {
                    BigDecimal b = new BigDecimal(rt.getLevelRoadDrivingFuelConsumption()*0.1*1000*100/rt.getLevelRoadDrivingMileage());
                    rt.setLevelRoadDrivingOilConsumption(b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue());
                } else {
                    BigDecimal b = new BigDecimal(-1.00);
                    rt.setLevelRoadDrivingOilConsumption(b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue());
                }
//                if (debug) {
//                    System.out.println("LevelRoadDrivingOilConsumption => " + rt.getLevelRoadDrivingFuelConsumption());
//                }
                return new Tuple2<>(doc.getAs("terminalId"), rt);
            }
        });
    }

    /**
     * 统计月度各车载荷的最小和最大载荷和油耗数值
     * @param rowPairs 待统计RDD数据
     * @param date 统计日期
     * @return 月度各车载荷的最小和最大载荷和油耗数值
     */
    private JavaRDD<TruckLoadMonthMetrics> computeTruckLoad(JavaPairRDD<Long, TruckLoadEstimated> rowPairs, Date date) {

        JavaPairRDD<Long, Iterable<TruckLoadEstimated>> groups = rowPairs.groupByKey();
        return groups.map(new Function<Tuple2<Long, Iterable<TruckLoadEstimated>>, TruckLoadMonthMetrics>() {
            @Override
            public TruckLoadMonthMetrics call(Tuple2<Long, Iterable<TruckLoadEstimated>> longIterableTuple2) throws Exception {
                Iterable<TruckLoadEstimated> iterable = longIterableTuple2._2();
                List<TruckLoadEstimated> list = Lists.newArrayList(iterable);
                TruckLoadMonthMetrics rt = new TruckLoadMonthMetrics();
                rt.setTerminalId(longIterableTuple2._1());
                rt.setTimeMetrics(date.getTime());

                // 存放过滤结果的列表
                List<TruckLoadEstimated> myList = null;

                // 使用lambda表达式过滤出结果并放到result列表里
                myList = list.stream().filter((TruckLoadEstimated b) -> (b.getRouteEndTime()-b.getRouteStartTime())>3600).collect(Collectors.toList());

                // 存放过滤结果的列表
                List<TruckLoadEstimated> loadList = null;

                // 使用lambda表达式过滤出结果并放到result列表里
                loadList = myList.stream().filter((TruckLoadEstimated b) -> b.getVehicleestimatedload()>0).collect(Collectors.toList());

                Collections.sort(loadList, new Comparator<TruckLoadEstimated>() {//升序排序
                    @Override
                    public int compare(TruckLoadEstimated o1, TruckLoadEstimated o2) {
                        return o1.getVehicleestimatedload() - o2.getVehicleestimatedload();
                    }
                });

                int l_percent100 = loadList.size();
                if (debug) {
                    System.out.println("l_percent100 => " + l_percent100);
                }
                if (l_percent100==0) {
                    rt.setMaxload(MAXLOAD);
                    rt.setMinload(MINLOAD);
                } else {
                    int l_percent75 = (int) Math.floor(l_percent100*0.75);
                    int l_percent25 = (int) Math.floor(l_percent100*0.25);
                    int l_percent75_load = loadList.get(l_percent75).getVehicleestimatedload();
                    int l_percent25_load = loadList.get(l_percent25).getVehicleestimatedload();
                    int upperLimit = (int) Math.floor(l_percent75_load+(l_percent75_load-l_percent25_load)*1.5);
                    int lowerLimit = (int) Math.floor(l_percent25_load-(l_percent75_load-l_percent25_load)*1.5);
                    rt.setMinload(Math.max(getMinload(loadList.get(0).getVehicleestimatedload()),getMinload(lowerLimit)));
                    rt.setMaxload(Math.max(Math.min(getMaxload(loadList.get(l_percent100-1).getVehicleestimatedload()),getMaxload(upperLimit)),rt.getMinload()));

                    if (debug) {
                        System.out.println("rt.getTerminalId => " + rt.getTerminalId());
                        System.out.println("upperLimit => " + upperLimit);
                        System.out.println("lowerLimit => " + lowerLimit);
                        System.out.println("myList.get(0).getVehicleestimatedload() => " + loadList.get(0).getVehicleestimatedload());
                        System.out.println("myList.get(l_percent100-1).getVehicleestimatedload() => " + loadList.get(l_percent100-1).getVehicleestimatedload());
                    }
                }


                // 存放过滤结果的列表
                List<TruckLoadEstimated> oilList = null;

                // 使用lambda表达式过滤出结果并放到result列表里
                oilList = myList.stream().filter((TruckLoadEstimated b) -> b.getLevelRoadDrivingOilConsumption()>0).collect(Collectors.toList());

                Collections.sort(oilList, new Comparator<TruckLoadEstimated>() {//升序排序
                    @Override
                    public int compare(TruckLoadEstimated o1, TruckLoadEstimated o2) {
                        return (o1.getLevelRoadDrivingOilConsumption()-o2.getLevelRoadDrivingOilConsumption())>0?1:-1;
                    }
                });

                int oil_l_percent100 = oilList.size();
                if (debug) {
                    System.out.println("oil_l_percent100 => " + oil_l_percent100);
                }
                BigDecimal b = new BigDecimal(0);
                if (oil_l_percent100==0) {
                    b = new BigDecimal(MINOIL);
                    rt.setMinoil(b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue());
                    b = new BigDecimal(MAXOIL);
                    rt.setMaxoil(b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue());
                } else {
                    int oil_l_percent75 = (int) Math.floor(oil_l_percent100*0.75);
                    int oil_l_percent25 = (int) Math.floor(oil_l_percent100*0.25);
                    double oil_l_percent75_load = oilList.get(oil_l_percent75).getLevelRoadDrivingOilConsumption();
                    double oil_l_percent25_load = oilList.get(oil_l_percent25).getLevelRoadDrivingOilConsumption();
                    b = new BigDecimal(oil_l_percent75_load+(oil_l_percent75_load-oil_l_percent25_load)*1.5);
                    double oil_upperLimit = b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue();
                    b=new BigDecimal(oil_l_percent25_load-(oil_l_percent75_load-oil_l_percent25_load)*1.5);
                    double oil_lowerLimit = b.setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue();
                    if (debug) {
                        System.out.println("rt.getTerminalId => " + rt.getTerminalId());
                        System.out.println("oil_upperLimit => " + oil_upperLimit);
                        System.out.println("oil_lowerLimit => " + oil_lowerLimit);
                        System.out.println("oilList.get(0).getLevelRoadDrivingOilConsumption() => " + oilList.get(0).getLevelRoadDrivingOilConsumption());
                        System.out.println("oilList.get(oil_l_percent100-1).getLevelRoadDrivingOilConsumption() => " + oilList.get(oil_l_percent100-1).getLevelRoadDrivingOilConsumption());
                    }
                    rt.setMinoil(Math.max(getMinOil(oilList.get(0).getLevelRoadDrivingOilConsumption()),getMinOil(oil_lowerLimit)));
                    rt.setMaxoil(Math.max(Math.min(getMaxOil(oilList.get(oil_l_percent100-1).getLevelRoadDrivingOilConsumption()),getMaxOil(oil_upperLimit)),rt.getMinoil()));
                }

                return rt;
            }
        });
    }

    /**
     * 获取最小油耗
     * @param oil 待定油耗
     * @return 最小油耗
     */
    private double getMinOil(double oil) {
        return (oil>=MINOIL?oil:MINOIL)>MAXOIL?MAXOIL:(oil>=MINOIL?oil:MINOIL);
    }

    /**
     * 获取最大油耗
     * @param oil 待定油耗
     * @return 最大油耗
     */
    private double getMaxOil(double oil) {
        return oil<=MAXOIL?oil:MAXOIL;
    }

    /**
     * 获取最小载荷
     * @param load 待定载荷
     * @return 最小载荷
     */
    private int getMinload(int load) {
        return (load>=MINLOAD?load:MINLOAD)>MAXLOAD?MAXLOAD:(load>=MINLOAD?load:MINLOAD);
    }

    /**
     * 获取最大载荷
     * @param load 待定载荷
     * @return 最大载荷
     */
    private int getMaxload(int load) {
        return load<=MAXLOAD?load:MAXLOAD;
    }

    /**
     * 将月度各车载荷的最小和最大载荷和油耗数值存入hbase
     * @param result 月度各车载荷的最小和最大载荷数值
     * @param jsc JavaSparkContext
     */
    private void saveResult2HBase(JavaRDD<TruckLoadMonthMetrics> result, JavaSparkContext jsc) {
        Map<String, String> props = PropertiesUtil.getProperties();
        Configuration conf = HBaseConfiguration.create();
        //设置zooKeeper集群地址，也可以通过将hbase-site.xml导入classpath，但是建议在程序里这样设置
        conf.set("hbase.zookeeper.quorum",props.get("hbase.zookeeper.quorum"));
        //设置zookeeper连接端口，默认2181
        conf.set("hbase.zookeeper.property.clientPort", props.get("hbase.zookeeper.property.clientPort"));
        conf.set("zookeeper.znode.parent",  props.get("zookeeper.znode.parent"));
        if (debug) {
            conf.set("mapreduce.output.fileoutputformat.outputdir", "/tmp");
            conf.set("hbase.fs.tmp.dir", "/tmp");
        }


        // new Hadoop API configuration
        Job newAPIJobConfiguration1 = null;
        try {
            newAPIJobConfiguration1 = Job.getInstance(conf);
        } catch (IOException e) {
            e.printStackTrace();
        }

        newAPIJobConfiguration1.getConfiguration().set(TableOutputFormat.OUTPUT_TABLE, props.get("hbase.output.monthmetrics.tablename"));
        newAPIJobConfiguration1.setOutputFormatClass(TableOutputFormat.class);

        // create Key, Value pair to store in HBase

        JavaPairRDD<ImmutableBytesWritable, Put> hbasePuts = result.mapToPair(
                new PairFunction<TruckLoadMonthMetrics, ImmutableBytesWritable,Put>() {
                    @Override
                    public Tuple2<ImmutableBytesWritable, Put> call(TruckLoadMonthMetrics doc) throws Exception {


                        Put put = new Put(Bytes.toBytes(new StringBuffer(String.valueOf(doc.getTerminalId())).reverse().toString()));
                        put.addColumn(F, Bytes.toBytes("minload"), Bytes.toBytes(String.valueOf(doc.getMinload())));
                        put.addColumn(F, Bytes.toBytes("maxload"), Bytes.toBytes(String.valueOf(doc.getMaxload())));
                        put.addColumn(F, Bytes.toBytes("minoil"), Bytes.toBytes(String.valueOf(doc.getMinoil())));
                        put.addColumn(F, Bytes.toBytes("maxoil"), Bytes.toBytes(String.valueOf(doc.getMaxoil())));

                        return new Tuple2<ImmutableBytesWritable, Put>(new ImmutableBytesWritable(), put);
                    }
                });

        // save to HBase- Spark built-in API method
        hbasePuts.saveAsNewAPIHadoopDataset(newAPIJobConfiguration1.getConfiguration());
    }

    /**
     * 将月度各车载荷的最小和最大载荷数值存入mongodb
     * @param result 月度各车载荷的最小和最大载荷数值
     * @param jsc JavaSparkContext
     */
    private void saveResult2Mongodb(JavaRDD<TruckLoadMonthMetrics> result, JavaSparkContext jsc) {
        //定义mongo的写入collection
        Map<String, String> writeOverrides = new HashMap<>(16);
        writeOverrides.put("collection", collectionName);
        writeOverrides.put("writeConcern.w", "majority");
        WriteConfig writeConfig = WriteConfig.create(spark).withOptions(writeOverrides);

        //转换统计结果到document
        JavaRDD<Document> documentJavaRDD = result.mapPartitions((FlatMapFunction<Iterator<TruckLoadMonthMetrics>, Document>) iterator -> {
            List<Document> resultList = new ArrayList<>();
            int cnt = 0;
            while (iterator.hasNext()) {
                TruckLoadMonthMetrics entity = iterator.next();
                if (entity != null) {
                    String json = GsonUtil.gsonString(entity);
                    resultList.add(Document.parse(json));
                    if (debug) {
                        cnt++;
                        System.out.println("TruckLoadMonthMetrics_" + cnt + " : " + json);
                    }
                }
            }
            return resultList.iterator();
        });
        MongoSpark.save(documentJavaRDD, writeConfig);
    }


}
