package com.atguigu.userprofile.task;

import com.atguigu.userprofile.MysqlUtil.MyClickhouseUtil;
import com.atguigu.userprofile.MysqlUtil.MyPropsUtil;
import com.atguigu.userprofile.bean.Taginfo;
import com.atguigu.userprofile.constant.ConstCode;
import com.atguigu.userprofile.dao.TaginfoDao;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.stream.Stream;

public class TaskExport {
    public static void main(String[] args) {
        //1.获取外部传入的参数 taskId busiDate
        String taskId=args[0];
        String busiDate=args[1];

        //2.明确表中有哪些列
        //通过计算那些标签
        List<Taginfo> taginfos  = TaginfoDao.selectTagInfosByTaskWithStatusEnable();
        //3.动态创建宽表
        /**
         * create table if not exists [ckDname].[cktable](
         * uid String,
         * [tagColumns]
         * )
         * engine=MergeTree
         * order by uid
        * */
        String upName = MyPropsUtil.get(ConstCode.UP_DBNAME);
        String ckName = MyPropsUtil.get(ConstCode.CK_DBNAME);
        String ckUrl=MyPropsUtil.get(ConstCode.CK_URL);
        String tagColumns = taginfos.stream().map(
                taginfo -> taginfo.getTagCode().toLowerCase() + " String"
        ).collect(Collectors.joining(" , "));
        String tableName="up_merge_"+busiDate.replace("-","");
        String createTable=" create table if not exists "+ckName+"."+tableName+
                " ( "+
                " uid String ,"+tagColumns+
                " )"+
                " engine = MergeTree "+
                " order by uid ";
        System.out.println("createTable :" + createTable);
        //删除表
        String dropTable="drop table if exists "+ckName+"."+tableName;
        System.out.println("dropTable:"+dropTable);
        //创建表
        MyClickhouseUtil.executesql(createTable);
        MyClickhouseUtil.executesql(dropTable);
        String selectColumns = taginfos.stream().map(
                taginfo -> taginfo.getTagCode().toLowerCase()
        ).collect(Collectors.joining(" , "));
        String selectsql="select uid ,"+selectColumns+ " from "+upName +"."+tableName;
        SparkConf sparkConf = new SparkConf().setAppName("task_export_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();
        Dataset<Row> dataset = sparkSession.sql(selectsql);

        dataset.write()
                .mode(SaveMode.Append)
                .option("driver","ru.yandex.clickhouse.ClickHouseDriver")
                .option("numPartitions",4)
                .option("batchsize",1000)
                .option("isolationLevel","NONE")
                .option("createTableOptions","ENGINE=Log()")
                .jdbc(ckUrl, tableName, new Properties());
    }
}
