package com.atguigu.userprofile.app;

import com.atguigu.userprofile.common.bean.TagInfo;
import com.atguigu.userprofile.common.dao.TagInfoDAO;
import com.atguigu.userprofile.common.util.MyClickhouseUtil;
import com.atguigu.userprofile.common.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskExportCH {


    //0  查询标签集合
    //
    //1 建表    表结构 相同
    //               表容器 clickhouse
    //
    //
    //2  读   读取hive宽表
    //             hive宽表读取成对象  rdd  dataset  dataframe
    //
    //3  写   写入clickhouse
    //              jdbc    用对象执行jdbc操作进行写入
    public static void main(String[] args) {
        String busiDate = args[1];

        // spark 环境
        SparkConf sparkConf = new SparkConf().setAppName("task_export_ch_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //0  查询标签集合
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();

        //1 建表    表结构 相同
        //               表容器 clickhouse
        // create table $db.$tablename
        //(uid String ,  $field_names String )
        // engine=MergeTree
        // primary key uid 可省
        //order by  uid

        String tableName = "user_tag_merge_"+busiDate.replace("-","");

        List<String> filedList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " String ").collect(Collectors.toList());
        String fieldNames = StringUtils.join(filedList, ",");

        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwName = properties.getProperty("data-warehouse.dbname");
        String upName = properties.getProperty("user-profile.dbname");


        String dropTableSQL="drop table if exists "+upName+"."+tableName;
        System.out.println(dropTableSQL);

        String createTableSQL="   create table  "+upName+"."+tableName +
                "        (uid String ,  "+ fieldNames+ " )\n" +
                "         engine=MergeTree\n" +
                "        order by  uid";
        System.out.println(createTableSQL);

        MyClickhouseUtil.executeSql(dropTableSQL);
        MyClickhouseUtil.executeSql(createTableSQL);

        //2  读   读取hive宽表
        //     hive宽表读取成对象  rdd  dataset  dataframe


        Dataset<Row> dataset = sparkSession.sql("select * from "+upName+"."+tableName) ;

        //3  写  通过jdbc写入clickhouse
        String clickhoseURL=properties.getProperty("clickhouse.url");
        dataset.write().mode(SaveMode.Append)
                .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
                .option("batchsize",1000)   //批次提交
                .option("isolationLevel","NONE") //是否有事务
                .option("numPartitions", "4") // 设置并发
                .jdbc(clickhoseURL,tableName,new Properties());

    }
}
