package com.atguigu.upp.apps;

import com.atguigu.upp.mappers.ClickHouseMapper;
import com.atguigu.upp.utils.PropertiesUtil;
import com.atguigu.upp.utils.SqlTaskExecuteUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * Created by Smexy on 2022/9/13
 *
 *  从hive中查询宽表的数据，导出到ck中
 */
public class ExportToCKApp
{
    public static void main(String[] args) throws IOException {

        Integer taskId = Integer.parseInt(args[0]);
        String do_date = args[1];

        //准备一个SparkSession
        SparkSession sparkSession = SqlTaskExecuteUtil.createSparkSession("ExportToCKApp");

        //要合并的宽表
        List<String> tableNames = SqlTaskExecuteUtil.queryTagTableNameToMerge(sparkSession);

        SqlSessionFactory sqlSessionFactory = SqlTaskExecuteUtil.createSqlSessionFactory("clickhouse-config.xml");
        SqlSession sqlSession = sqlSessionFactory.openSession();
        ClickHouseMapper clickHouseMapper = sqlSession.getMapper(ClickHouseMapper.class);

        //查询hive,导出到ck
        exportToCK(do_date,sparkSession,tableNames,clickHouseMapper);


    }

    public static void exportToCK(String do_date,SparkSession sparkSession,List<String> tableNames,ClickHouseMapper clickHouseMapper){



        //确认库名和表名
        String updbname = PropertiesUtil.getValue("updbname");
        String upwideprefix = PropertiesUtil.getValue("upwideprefix");
        String tableName = upwideprefix + do_date.replace('-', '_');

        //先删表，再创建。保证幂等性
        clickHouseMapper.deleteTable(tableName);
         /*
                查询要今天要合并的标签有哪些
               tag_consume_behaivor_order_amount_7d String,
                tag_person_nature_gender             String,
                tag_person_nature_period             String
         */
        List<String> tags = tableNames.stream().map(tag -> tag + " String ").collect(Collectors.toList());
        String tagSql = StringUtils.join(tags, ',');

        //建表   JDBC方式操作
        clickHouseMapper.createCKWideTable(tableName,tagSql);

        //查询hive
        String hiveQuerySql = "select * from %s.%s";

        Dataset<Row> result = sparkSession.sql(String.format(hiveQuerySql, updbname, tableName));

        //写入  默认情况的话，spark是自己建表。而我们已经建好表了，模式: 追加
        result.write()
            .mode(SaveMode.Append)
            .option("driver",PropertiesUtil.getValue("ck.jdbc.driver.name"))
            .option("batchsize",500)
            .option("isolationLevel","NONE")   //ck没有事务，是OLAP 。事务关闭
            .option("numPartitions", "4") // 设置并发
            .jdbc(PropertiesUtil.getValue("ck.jdbc.url"),tableName,new Properties());

    }
}
