package com.atguigu.userprofile.task;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.constant.ConstCode;
import com.atguigu.userprofile.dao.TagInfoDao;
import com.atguigu.userprofile.util.ClickhouseUtil;
import com.atguigu.userprofile.util.MyPropsUtil;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * 将Hive中的标签宽表迁移到Clickhouse中
 *
 * 任务步骤：
 * 1. 获取外部传入的参数  taskId 、 busiDate
 *
 * 2. 明确宽表的列?  通过要计算的标签来确定
 *
 * 3. 创建宽表
 *
 * 4. 从hive中查询数据， 插入到Clickhouse中
 *   4.1 select(Hive)
 *   4.2 insert(Clickhouse)
 *
 * 5. 准备SparkSql环境 ， 执行SQL
 *
 */
public class TaskExport {

    public static void main(String[] args) {
        //1. 获取外部传入的参数  taskId 、 busiDate
        String taskId = args[0] ;
        String busiDate = args[1] ;

        //2. 明确宽表的列?  通过要计算的标签来确定
        List<TagInfo> tagInfos = TagInfoDao.selectTagInfosWithTaskEnable();

        //3. 创建宽表
        /*
           create table if not exists  [ckDbName].[tableName]
           (
             uid String ,
             [tagColumns]
           )
           engine = MergeTree
           order by (uid)

           引擎选择:
              MergeTree :
              ReplacingMergeTree :  去重 => 幂等。  一般会在实时处理中重点考虑幂等问题。 离线处理一般直接将计算失败的残留数据删除即可.
              SummingMergeTree :  预聚合
              Replicatedxxxx :  副本 => 数据可靠。
         */

        String ckDbName = MyPropsUtil.get(ConstCode.CLICKHOUSE_DBNAME) ;
        String tableName = "tag_merge_" + busiDate.replace("-", "") ;
        String tagColumns = tagInfos.stream().map(
                tagInfo -> tagInfo.getTagCode().toLowerCase() + " String "
        ).collect(Collectors.joining(" , "));

        String dropTable = " drop table if exists " + ckDbName + "." + tableName ;

        String createTable =
                " create table if not exists  " + ckDbName + "." + tableName +
                " (" +
                " uid String , " + tagColumns +
                " )" +
                " engine=MergeTree " +
                " order by (uid)";
        System.out.println("createTable ==> " + createTable);

        ClickhouseUtil.executeSql(dropTable);
        ClickhouseUtil.executeSql(createTable);

        //4. 从hive中查询数据， 插入到Clickhouse中
        // 4.1 select(Hive)
        String upDBName = MyPropsUtil.get(ConstCode.USER_PROFILE_DBNAME) ;
        String selectTagColumns = tagInfos.stream().map(
                tagInfo -> tagInfo.getTagCode().toLowerCase()
        ).collect(Collectors.joining(" , "));
        String selectSql = " select uid , " + selectTagColumns + " from " + upDBName + "." + tableName ;
        SparkConf sparkConf = new SparkConf().setAppName("task_export_app");//setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();
        Dataset<Row> dataset = sparkSession.sql(selectSql);

        // 4.2 insert(Clickhouse)
        String ckUrl = MyPropsUtil.get(ConstCode.CLICKHOUSE_URL) ;
        dataset.write()
                .mode(SaveMode.Append)
                .option("driver" , "ru.yandex.clickhouse.ClickHouseDriver")
                .option("numPartitions" , "4")
                .option("batchsize" , "1000")
                .option("isolationLevel" , "NONE")
                .jdbc(ckUrl , tableName ,new Properties());
    }
}
