package com.sg.java.apps;

import com.sg.java.YHVolt;
import com.sg.java.YHVolt4;
import com.sg.java.entity.CMS_VOLT_CURVE;
import com.sg.java.entity.VoltU1_96;
import com.sg.java.entity.VoltageQualifiedVo;
import com.sg.java.security.SecurityPrepare;
import com.sg.java.util.DateUtils;
import com.sg.java.util.HBaseUtils;
import com.twitter.chill.Base64;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import scala.Tuple2;

import java.io.IOException;
import java.lang.reflect.Field;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringJoiner;
import java.util.function.Function;
import java.util.stream.Collectors;

public class RelCal {

    private static final Logger log = LoggerFactory.getLogger(RelCal.class);

    public static final DateTimeFormatter yyyyMMdd_dtf = DateTimeFormatter.ofPattern("yyyyMMdd");


    public static SparkConf defaultSparkConf() {
        return new SparkConf()
                .setAppName("RelCal")
//                .setMaster("yarn")//local[*] 本地环境;spark://node1:7077,mode2:7077 standalone模式的master节点;yarn yarn模式
//                .set("spark.submit.deployMode", "client") //spark 任务提交模式(仅限于yarn部署模式)，线上使用cluster模式，开发使用client模式。client模式就是driver在当前的机器上运行（即spark-submit所在的机器），cluster就是driver在集群上的某台机器运行，cluster是不能直接在启动控制台打印程序内日志的，client可以
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")//配置序列化方式（算子传输到executor需要将算子对象序列化成字节数组才能传输到executor反序列化成算子对象进行计算处理，默认是用的java serializeable序列化objectinputsream、objectoutputstream，这个替换了默认的序列化方式）
//                .set("spark.kryo.registrator", "com.huawei.bigdata.spark.examples.MyRegistrator")
                .set("spark.driver.cores", "2")//设置driver的cpu核数（driver主要是获取数据并拆分为task分发到executor上运行，并接收返回的数据计算结果）
                .set("spark.driver.memory", "4g")//driver给的内存大小
//                .set("spark.driver.maxResultSize","20g")//设置driver端结果存放的最大容量，这里设置成为20G，超过20G的数据,job就直接放弃，不运行了
//                .set("spark.cores.max", "10")//spark程序需要的总核数（分配的executor总数就等于120/4 = 30个）
//                .set("spark.executor.cores", "2")//每个executor分配的cpu核数
//                .set("spark.default.parallelism", "30")//该参数用于设置每个stage的默认task数量。这个参数极为重要，如果不设置可能会直接影响你的Spark作业性能。建议为cpu总核数的2-3倍。120 * 3 = 360
//                .set("spark.executor.memory", "4g")//每个executor分配的内存
//                .set("spark.executor.heartbeatInterval", "600s")//driver对executor的心跳检测间隔
//                .set("spark.rpc.askTimeout", "1800s")//spark task的executor通过rpc从driver拉取数据的超时时间
//                .set("spark.network.timeout", "3600s")//网络超时时间设置
                ;
    }

    public static void main(String[] args) throws Exception {
//        SecurityPrepare.cqEcsKerberosLogin();
        SecurityPrepare.cqEcsKerberosLogin2();
        log.info("driver.java.class.path：{}", System.getProperty("java.class.path"));
        long wholeCourseStart = System.currentTimeMillis();
        log.info("创建sparkConf");
        SparkConf sparkConf = new SparkConf().setAppName("ReaCal");
        log.info("创建sparkContext");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);
        log.info("创建hbaseConf");
        log.info("hadoopConfiguration参数：");
        jsc.hadoopConfiguration().forEach(entry -> log.info(entry.getKey() + "=" + entry.getValue()));
        Configuration hbaseConf = HBaseConfiguration.create(jsc.hadoopConfiguration());
        log.info("hbaseConf参数：");
        hbaseConf.forEach(entry -> log.info(entry.getKey() + "=" + entry.getValue()));
        log.info(HConstants.ZOOKEEPER_CLIENT_PORT + "=" + hbaseConf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
        log.info(HConstants.ZOOKEEPER_QUORUM + "=" + hbaseConf.get(HConstants.ZOOKEEPER_QUORUM));
//        log.info("设置hbaseConf默认配置");
//        try {
//            HBaseUtils.setBasicHBaseConf(hbaseConf);
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//        hbaseConf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181");
//        hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, "25.64.18.161");

        //eg:20220716
        String yesterday_DATA_DATE = LocalDate.now().plusDays(-1).format(yyyyMMdd_dtf);
        log.info("计算昨日数据：DATA_DATE=" + yesterday_DATA_DATE);

        String readTableName = HBaseUtils.withNamespace("cms_volt_curve_" + yesterday_DATA_DATE);

        log.info("创建hbase扫描器scan");
        Scan scan = new Scan();
        log.info("查询 info、U 此2列族下的列数据");
        scan.addFamily(Bytes.toBytes("info"));
        scan.addFamily(Bytes.toBytes("U"));
        log.info("扫描器设置筛选器filters");
        log.info("当前列筛选设置：info:DATA_DATE == {}", yesterday_DATA_DATE);
        //1700万数据算1条1kb，全部也就16G左右，可一次性读入内存（driver内存设大点）
        //按批次计算，每次取100万进行计算，每日预计待处理数据量在1700万左右，即批处理17-18次左右即可完成当日计算
        scan.setLimit(10000);
        String scanToString = Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray());
        log.info("scanToString：{}", scanToString);
        hbaseConf.set(TableInputFormat.SCAN, scanToString);
        log.info("指定查询表：{}", readTableName);
        hbaseConf.set(TableInputFormat.INPUT_TABLE, readTableName);

        log.info("spark接入hbase获取rdd");
        long readDataFromHBaseParseToSparkRDDStart = System.currentTimeMillis();

        JavaPairRDD<ImmutableBytesWritable, Result> rdd = jsc.newAPIHadoopRDD(hbaseConf, TableInputFormat.class, ImmutableBytesWritable.class, Result.class);

        log.info("rdd分区数量：{}", rdd.getNumPartitions());
        log.info("rdd数据数量：{}", rdd.count());

        log.info("从hbase读取数据转为spark rdd耗时：{}ms", System.currentTimeMillis() - readDataFromHBaseParseToSparkRDDStart);

        log.info("计算RDD每个分区数据");
        long calculateStart = System.currentTimeMillis();
        rdd.foreachPartition((VoidFunction<Iterator<Tuple2<ImmutableBytesWritable, Result>>>) iterator -> {
            log.info("executor.java.class.path：{}", System.getProperty("java.class.path"));
            SimpleDateFormat yyMMdd_sdf = new SimpleDateFormat("yyyyMMdd");
            SimpleDateFormat yyMMddHHmmss_sdf = new SimpleDateFormat("yyyyMMddHHmmss");
            List<VoltageQualifiedVo> list = new ArrayList<>();
            Tuple2<ImmutableBytesWritable, Result> item;
            String rowKey;
            Result result;

            log.info("获取redis连接");
            //有环境问题连不上，问题地址：https://support.huaweicloud.com/trouble-mrs/mrs_03_0297.html
            Jedis jedis = new Jedis("25.64.41.53", 6379);
            log.info("redis已成功连接");
            jedis.auth("Huawei@169");
            jedis.select(0);
            while (iterator.hasNext()) {
                item   = iterator.next();
                rowKey = Bytes.toString(item._1.get());
                result = item._2;
                CMS_VOLT_CURVE cms_volt_curve = new CMS_VOLT_CURVE();
                cms_volt_curve.setRow(Bytes.toString(result.getRow()));
                cms_volt_curve.setMETER_ID(Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("METER_ID"))));
                cms_volt_curve.setDATA_DATE(yyMMdd_sdf.parse(Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("DATA_DATE")))));
                cms_volt_curve.setPHASE_FLAG(Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("PHASE_FLAG"))));
                cms_volt_curve.setORG_NO(Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("ORG_NO"))));
                result.listCells().stream().filter(c -> "U".equals(Bytes.toString(c.getFamilyArray()))).forEach(c -> {
                    try {
                        cms_volt_curve.getCOL_TIME$U().add(new VoltU1_96(Integer.parseInt(
                                String.valueOf(DateUtils.getPoint(Bytes.toString(CellUtil.cloneQualifier(c))))),
                                Bytes.toString(CellUtil.cloneValue(c))
                        ));
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                });
                VoltageQualifiedVo vo = YHVolt.getResult(cms_volt_curve, yesterday_DATA_DATE);
                if (vo != null) list.add(vo);
            }
            log.info("关闭redis");
            jedis.close();
            log.info("写入hbase");
            writeToHbase(list, VoltageQualifiedVo.class, VoltageQualifiedVo::getId);
//            log.info("写入pgsql");
//            writeToPgSql(list);

        });
        log.info("RDD算子计算耗时：{}ms", System.currentTimeMillis() - calculateStart);

        log.info("处理结束");
        log.info("全程耗时：{}ms", System.currentTimeMillis() - wholeCourseStart);
        jsc.stop();
        jsc.close();
    }

    public static <T> void writeToHbase(List<T> list, Class<T> tClass, Function<T, String> rowKeyGenerator) throws IOException {
        long s = System.currentTimeMillis();
//        Connection conn = HBaseUtils.getHBaseConn(PropertiesUtil.createPropertiesFromResource(ResourcePath.hbase_properties));
//        SparkConf sparkConf = defaultSparkConf();
//        JavaSparkContext javaSparkContext = new JavaSparkContext(sparkConf);
//        Configuration hbaseConf = HBaseConfiguration.create(javaSparkContext.hadoopConfiguration());
        Configuration hbaseConf = HBaseConfiguration.create();
        log.info("hbase已成功连接");
//        hbaseConf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181");
//        hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, "25.64.18.161");
//        try {
//            HBaseUtils.setBasicHBaseConf(hbaseConf);
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
        Connection conn = ConnectionFactory.createConnection(hbaseConf);
        Admin admin = conn.getAdmin();
        TableName tableName = TableName.valueOf(HBaseUtils.withNamespace(tClass.getSimpleName()));
        log.info("表名：{}", tableName.toString());

        Field[] fields = tClass.getDeclaredFields();
        for (Field field : fields) {
            field.setAccessible(true);
        }
        log.info("synchronized同步创建表：{}", tableName);
        synchronized (RelCal.class) {
            if (!admin.isTableAvailable(tableName)) {
                List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
                for (Field f : fields) {
                    cfDescriptors.add(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(f.getName())).setMaxVersions(3).build());
                }
                admin.createTable(TableDescriptorBuilder.newBuilder(tableName)
                        .setColumnFamilies(cfDescriptors)
                        .build());
            }
        }
        log.info("获取表：{}", tableName);
        Table table = conn.getTable(tableName);
        List<Put> puts = list.stream().map(item -> {
            Put put = new Put(Bytes.toBytes(rowKeyGenerator.apply(item)));
            for (Field field : fields) {
                try {
                    put.addColumn(Bytes.toBytes("info"), Bytes.toBytes(field.getName()), Bytes.toBytes(field.get(item).toString()));
                } catch (IllegalAccessException e) {
                    throw new RuntimeException(e);
                }
            }
            return put;
        }).collect(Collectors.toList());

        log.info("插入hbase，puts.size：{}", puts.size());
        table.put(puts);

        log.info("关闭hbase连接");
        conn.close();
        log.info("写入hbase耗时：{}s", (System.currentTimeMillis() - s) / 1000);
    }

    public static void writeToPgSql(List<VoltageQualifiedVo> list) {
        log.info("获取pgsql连接");
        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
        java.sql.Connection pgsqlConn = null;
        try {
            pgsqlConn = DriverManager.getConnection("jdbc:postgresql://25.64.68.232:25432/assetmanagement?", "pms3_qkjdy", "pms3_qkjdy");
            StringBuilder sb = new StringBuilder("insert into pms3_qkjdy.ESSENTIAL_DATA (id,sbname,sbid,date_time,dycount,uper_time_array,lower_time_array,valid_date_time,monitor_time,max_value,max_value_time,min_value,min_value_time,device_type,device_level,org_no,org_name,pid,dy_practical_count,pbtype,up_time,down_time,exception_code,voltage_level,avg) values ");
            StringJoiner sj = new StringJoiner(",");
            for (VoltageQualifiedVo vo : list) {
                sj.add("(" +
                       "'" + vo.getId() + "'," +
                       "'" + vo.getSbName() + "'," +
                       "'" + vo.getSbId() + "'," +
                       "'" + sdf.format(vo.getDateTime()) + "'," +
                       vo.getDyCount() + "," +
                       "'" + vo.getUperTimeArray() + "'," +
                       "'" + vo.getLowerTimeArray() + "'," +
                       "'" + vo.getValidDataTime() + "'," +
                       "'" + vo.getMonitorTime() + "'," +
                       vo.getMaxValue() + "," +
                       "'" + sdf.format(vo.getMaxValueTime()) + "'," +
                       vo.getMinValue() + "," +
                       "'" + sdf.format(vo.getMinValueTime()) + "'," +
                       "'" + vo.getDeviceType() + "'," +
                       "'" + vo.getDeviceLevel() + "'," +
                       "'" + vo.getOrgNo() + "'," +
                       "'" + vo.getOrgName() + "'," +
                       "'" + vo.getPid() + "'," +
                       vo.getRealVoltagePoint() + "," +
                       "'" + vo.getPbtype() + "'," +
                       "'" + vo.getUpTime() + "'," +
                       "'" + vo.getDownTime() + "'," +
                       "'" + vo.getExceptionCode() + "'," +
                       vo.getVoltageLevel() + "," +
                       vo.getAvg() +
                       ")");
            }
            sb.append(sj);
            PreparedStatement ps = pgsqlConn.prepareStatement(sb.toString());
            ps.executeUpdate();
        } catch (SQLException e) {
            e.printStackTrace();
        } finally {
            try {
                if (pgsqlConn != null) {
                    pgsqlConn.close();
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }

}
