package com.atguigu.upp.app;

import com.atguigu.upp.bean.TagInfo;
import com.atguigu.upp.bean.TaskInfo;
import com.atguigu.upp.bean.TaskTagRule;
import com.atguigu.upp.service.ClickhouseService;
import com.atguigu.upp.service.MysqlDBService;
import com.atguigu.upp.service.UPPUtil;
import jodd.util.PropertiesUtil;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * Created by Smexy on 2023/1/4
 *
 *  过程：
 *          ①查询当天计算了哪些标签
 *                  任务一中，每一个task负责计算一个标签。
 *                  查询当天要计算的task有哪些，查询Mysql中的元数据！
 *          ②使用pivotsql，从hive中查询宽表
 *              使用sparksql
 *          ③在ck中建表
 *              使用jdbc技术
 *          ④将hive中查询的宽表写入ck
 *              读hive ----> DataFrame ----> 写CK
 *
 */
public class MergeWideTableApp
{
    public static void main(String[] args) throws IOException {

        //接收参数
        String taskId = args[0];
        String doDate = args[1];

       /* String taskId = "3";
        String doDate = "2020-06-14";*/

        //查询当天计算了哪些标签
        SqlSessionFactory mysqlSSF = UPPUtil.getSqlSessionFactoryByConfig("mysql_config.xml");
        MysqlDBService mysqlDBService = new MysqlDBService(mysqlSSF.openSession());
        List<TagInfo> tags = mysqlDBService.getTagInfoTodayExecute();

        //使用pivotsql，从hive中查询宽表
        String pivotSql = generatePivotSql(tags, doDate);

        //从hive中读，写入ck
        SqlSessionFactory ckSSF = UPPUtil.getSqlSessionFactoryByConfig("ck_config.xml");
        ClickhouseService clickhouseService = new ClickhouseService(ckSSF.openSession());
        SparkSession sparkSession = UPPUtil.getSparkSession("MergeWideTableApp");

        writeWideTableToCk(tags,clickhouseService,sparkSession,pivotSql,doDate);

    }

    //写入到ck的方法
    private static void writeWideTableToCk(List<TagInfo> tags,ClickhouseService clickhouseService, SparkSession sparkSession,String pivotSql,String doDate) {

        //确定ck中宽表的名字    每天的结果单独存一张表( 固定的前缀_当天的日期 )
        // 处理日期中的 - ，替换为下划线
        String upwideprefix = UPPUtil.getPropertyValue("upwideprefix");
        String table = upwideprefix + doDate.replace("-","_");

        //为了保障幂等性，如果这个程序今天已经执行过了，那么在ck中就有一张宽边，再次执行之前，先删除
        clickhouseService.dropWideTable(table);

        //建表
        String column = tags.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + "  String ").collect(Collectors.joining(","));
        clickhouseService.createWideTable(table,column);
        //查询
        Dataset<Row> data = sparkSession.sql(pivotSql);

        //写入
        Properties properties = new Properties();

        data.write()
             //默认ErrorIfExists,表已经存在，就直接报错
            //表如果存在，就向表中追加
            .mode(SaveMode.Append)
            .option("driver", UPPUtil.getPropertyValue("ck.jdbc.driver.name"))
            .option("batchsize",500)
            // ck是一个OLAP框架，没有事务
            .option("isolationLevel","NONE")   //事务关闭
            .option("numPartitions", "4") // 设置并发
            .jdbc(UPPUtil.getPropertyValue("ck.jdbc.url"),table,properties);

    }


    /*
select
   *
from  (
       select uid,tagValue,'tag_consume_behavior_order_7damount' tagCode from upp220828.tag_consume_behavior_order_7damount where dt = '2020-06-14'
       union all
       select uid,tagValue,'tag_population_attributes_nature_gender' tagCode from upp220828.tag_population_attributes_nature_gender where dt = '2020-06-14'
       union all
       select uid,tagValue, 'tag_population_attributes_nature_period' tagCode from upp220828.tag_population_attributes_nature_period where dt = '2020-06-14'
    )tmp
pivot(
  max(tagValue)
  for tagCode in ( 'tag_consume_behavior_order_7damount', 'tag_population_attributes_nature_gender','tag_population_attributes_nature_period')
);

     */
    private static String generatePivotSql(List<TagInfo> tags,String doDate){

        String template = " select * from ( %s ) tmp pivot( max(tagValue) for tagCode in  ( %s )   ) ";

        //根据今天要计算的标签，生成 tmp中的union all拼接
        String unionSql =  " select uid,tagValue,'%s' tagCode from %s.%s where dt = '%s' ";

        //获取hive中的库名
        String db = UPPUtil.getPropertyValue("updbname");

        //确定查询的源表
        String tmpSql = tags.stream()
                             .map(tagInfo -> String.format(unionSql, tagInfo.getTagCode().toLowerCase(), db, tagInfo.getTagCode().toLowerCase(), doDate))
                             .collect(Collectors.joining(" union all "));

        String inSql = tags.stream()
                             .map(tagInfo -> "'" + tagInfo.getTagCode().toLowerCase() + "'")
                             .collect(Collectors.joining(","));

        //格式化
        String pivotSql = String.format(template, tmpSql, inSql);

        System.out.println(pivotSql);

        return pivotSql;

    }

}
