package com.atguigu.userprofile.app;

import com.atguigu.userprofile.common.bean.TagInfo;
import com.atguigu.userprofile.common.dao.TagInfoDAO;
import com.atguigu.userprofile.common.util.MyClickhouseUtil;
import com.atguigu.userprofile.common.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskExportCk {



    //1   获得启用状态的标签集合  mysql 查询
    //
    //2  clickhouse建表 和hive宽表同名
    //      每天一张宽表
    //
    //3  读取hive中的宽表
    //     java list       宽表   rdd  dataframe  dataset  clickhouse
    //4  写入到clickhouse
    //    把 list往clickhouse里写入
    public static void main(String[] args) {

        String taskId = args[0];
        String busiDate = args[1];

       //System.setProperty("hadoop.home.dir", "d:\\hadoop");//打包时注释掉

        SparkConf sparkConf = new SparkConf().setAppName("task_export_ck_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //1   获得启用状态的标签集合  mysql 查询
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();

       // 批处理  利用   drop 保证幂等性

        //2  clickhouse建表 和hive宽表同名
        //      每天一张宽表
        //   create table  $tableName
        //  (uid String , $fieldSQL  )
        // engine=MergeTree
        //  partition 不分区
        // order by uid

        String tableName="up_tag_merge_"+busiDate.replace("-","");

        List<String> fieldList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " String").collect(Collectors.toList());
        String fieldSQL = StringUtils.join(fieldList, ",");

        String dropTableSQL=" drop  table if exists "+tableName;
        System.out.println(dropTableSQL);

        MyClickhouseUtil.executeSql(dropTableSQL);

        String createTableSQL="    create table  " +tableName+
                "          (uid String ,  "+fieldSQL+" )\n" +
                "        engine=MergeTree\n" +
                "      order by uid\n ";

        System.out.println(createTableSQL);

        MyClickhouseUtil.executeSql(createTableSQL);

        //3  读取hive中的宽表
        //     java list  不行     宽表   rdd  dataframe  dataset  clickhouse
        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwDbName = properties.getProperty("data-warehouse.dbname");
        String upDbName = properties.getProperty("user-profile.dbname");

        Dataset<Row> dataset = sparkSession.sql("select * from " + upDbName + "." + tableName);

        //4  写入到clickhouse
        //    把 list往clickhouse里写入
        String clickhouseURl = properties.getProperty("clickhouse.url");
        dataset.write().mode(SaveMode.Append)
                .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
                .option("batchsize",500)
                .option("isolationLevel","NONE")   //事务关闭
                .option("numPartitions", "4") // 设置并发
                .jdbc(clickhouseURl,tableName,new Properties());


    }

}
