package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.util.MyClickhouseUtil;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskExportApp {


    //1 查询启用的标签集合
    //
    //2 建表 ：根据启用的标签集合建立clickhouse宽表
    //
    //3 读   从hive中读取      把hive中的数据提取到rdd 、dataframe、dataset   jdbc
    //
    //4 写   写入clickhouse      jdbc
    public static void main(String[] args) {

        String busiDate = args[1];
        SparkConf sparkConf = new SparkConf().setAppName("task_export_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //1 查询启用的标签集合
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();
        //2 建表 ：根据启用的标签集合建立clickhouse宽表
        // create table  $tableName
        //( uid String,   tg_xxxx String ,tg_xxxx String ... )
        // 引擎 engine= MergeTree    //批处理是不用去重的引擎来保证幂等性 //当日数据如果重跑 ，当日数据清理掉即可。
        // 分区 ？ 每天一张 不分区
        //  primary key ?  默认是orderby 如果一致 则可省
         // order by 必填 uid

        String tableName = "user_tag_merge_"+busiDate.replace("-","");

        List<String> fieldList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " String").collect(Collectors.toList());
        String  fieldListSQL = StringUtils.join(fieldList, ",");



        String createTableSQL="  create table   "+tableName+"\n" +
                "        ( uid String,  "+fieldListSQL+" )\n" +
                "           engine= MergeTree     \n" +
                "           order by  uid";


        String dropTableSQL="drop table if exists "+tableName;

        System.out.println(dropTableSQL);
        MyClickhouseUtil.executeSql(dropTableSQL);
        System.out.println(createTableSQL);
        MyClickhouseUtil.executeSql(createTableSQL);


        //3 读   从hive中读取      把hive中的数据提取到rdd 、dataframe、dataset   jdbc
        Properties properties = MyPropertiesUtil.load("config.properties");

        String upName = properties.getProperty("user-profile.dbname");
        String clickhouseURL = properties.getProperty("clickhouse.url");

        Dataset<Row> dataset = sparkSession.sql("select * from " + upName + "." + tableName);

        //4 写   写入clickhouse      jdbc
        dataset.write().mode(SaveMode.Append)
                .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
                .option("batchsize","1000")
                .option("isolationLevel","NONE")   //事务关闭
                .option("numPartitions", "4") // 设置并发
                .jdbc(clickhouseURL,tableName,new Properties());



    }
}
