package com.shopee.am.tool;

import org.apache.hadoop.hbase.client.Put;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.UUID;

import static java.lang.System.currentTimeMillis;

/**
 * @author:huanqing.cheng
 * @time:2022/1/25 5:22 下午
 * @description: 初始化JavaSparkContext、SparkSession
 */
public class SparkTool {

    private static Logger logger = LoggerFactory.getLogger(SparkTool.class);
    public static JavaSparkContext jsc;
    public static SparkSession spark;
    private static String appName = "tagService";

    /**
     * 初始化Spark环境
     */
    public static void initSpark() {
        if (jsc == null || spark == null) {
            SparkConf sparkConf = new SparkConf();
            //设置序列化方式,官方说明KryoSerializer比JavaSerializer性能好10倍
            sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
            spark = SparkSession.builder().appName(appName).config(sparkConf).enableHiveSupport().getOrCreate();
            jsc = new JavaSparkContext(spark.sparkContext());
        }
    }

    /**
     * GET-JavaSparkContext
     */
    public static JavaSparkContext getJsc() {
        if (jsc == null) {
            initSpark();
        }
        return jsc;
    }

    /**
     * GET-SparkSession
     */
    public static SparkSession getSession() {
        if (spark == null) {
            initSpark();
        }
        return spark;
    }

    /**
     * Tag属性更新
     */
    public static void query(String sql) {
        SparkTool.initSpark();
        int rowCount = 0;
        int patchCount = 0;
        System.out.println("初始化Spark环境成功");

        Dataset<Row> dataset = spark.sql(sql);
        ArrayList<Put> puts = new ArrayList<>();
        long startTime = currentTimeMillis();
        System.out.println("开始写入hbase......");
        for (Row row : dataset.collectAsList()) {
            puts.add(HBaseTool.createPut(UUID.randomUUID().toString().replace("-", ""), "a",
                    "user_id", row.getString(0)));
            puts.add(HBaseTool.createPut(UUID.randomUUID().toString().replace("-", ""), "a",
                    "region", row.getString(1)));
            puts.add(HBaseTool.createPut(UUID.randomUUID().toString().replace("-", ""), "a",
                    "email", row.getString(2)));

            //200条写一次
            rowCount++;
            if (rowCount % 200 == 0) {
                patchCount++;
                System.out.println("第"+patchCount+"批数据正在写入中...");
                HBaseTool.putData(puts);
                puts.clear();
                System.out.println("第"+patchCount+"批数据写入完成");
            }
        }
        //最后一批
        if (!puts.isEmpty()) {
            HBaseTool.putData(puts);
            System.out.println("最后一批数据,总量:"+puts.size()+" 写入完成");
        }
        System.out.println("批量写入hbase成功,总量：" + rowCount + ",耗时" + (System.currentTimeMillis() - startTime) / 1000 + "s");

        HBaseTool.close();
        SparkTool.close();
    }

    /**
     * spark资源关闭
     */
    public static void close() {
        try {
            if (spark != null) spark.close();
            System.out.println("Spark资源关闭成功");
        } catch (Exception e) {
            logger.error("Spark资源关闭失败: ", e);
        }
    }

}
