package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDao;
import com.atguigu.userprofile.util.MyClickhouseUtil;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskOutApp {
    public static void main(String[] args) {
        Properties properties = MyPropertiesUtil.load("config.properties");
        String upDBName = properties.getProperty("user-profile.dbname");
        String clickhouseUrl = properties.getProperty("clickhouse.url");

        //获取spark环境
        SparkConf sparkConf = new SparkConf().setAppName("TaskOutApp");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

        //1.获取外界传参的业务日期
        String busiDate = args[1];

        //2.查询所有已启用标签的标签明细
        List<TagInfo> tagInfoWithOnList = TagInfoDao.getTagInfoWithOn();

        //3.动态拼接Clickhouse中的建表语句
        /**
         *  create table if not exists user_profile.up_tag_merge_20200614
         *   (uid String, tg_person_base_gender String,tg_person_base_agegroup String)
         *   engine=MergeTree
         *   order by uid
         */

        //获取所有已启用标签的tag_code拼接为建表语句中的字段
        List<String> tagCodeFiledList = tagInfoWithOnList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " String").collect(Collectors.toList());
        String tagCodeFiledSQL = StringUtils.join(tagCodeFiledList, ",");


        String tableName = "up_tag_merge_" + busiDate.replace("-", "");

        //在每次重新跑之前，先把原始的表删除掉，为了防止有之前的脏数据，这样可以保证幂等操作
        String droptableSQL = "drop table if exists " + tableName;
        MyClickhouseUtil.executeSql(droptableSQL);

        String createTableSQL = "create table if not exists " + upDBName + "." + tableName + " \n" +
                "          (uid String, " + tagCodeFiledSQL + ") \n" +
                "          engine=MergeTree \n" +
                "          order by uid";

        System.out.println(createTableSQL);

        //调用工具类在Clickhouse中建表
        MyClickhouseUtil.executeSql(createTableSQL);

        //4.查询hive宽表的数据
        Dataset<Row> dataset = sparkSession.sql("select * from " + upDBName + "." + tableName);

        //5.将查询出来的数据写入Clickhouse
        dataset.write().mode(SaveMode.Append)
                .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
                .option("batchsize",500) //批量提交1.减少连接 网络IO次数 2.减少磁盘碎片
                .option("isolationLevel","NONE")   //事务关闭
                .option("numPartitions", "4") // 设置并发
                .jdbc(clickhouseUrl, tableName, new Properties());
    }
}
