package com.swsc.ai.tasks;

import com.swsc.ai.config.SparkSqlConf;
import com.swsc.ai.constant.EnvConstant;
import com.swsc.ai.util.SparkUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.sql.SQLException;

/**
 * @describe: 客户基金交易行为特征
 * @author: DuanCXin
 * @created: 2023-10-25 16:32
 */
public class FundTransBehaviorTask extends TemplateTask{
    @Override
    public String genInputPath(String dataSource) {
        return null;
    }

    @Override
    protected String genSelectColumns() {
        return "cust_no, trade_date, prod_code, business_amount, business_price";
    }

    @Override
    public String genWhereCondition(String taskDate) {
        String whereCondition = "WHERE trade_date = "+taskDate;
        return whereCondition;
    }

    @Override
    public void processData(SparkSession session, String outputPath, String... inputParams) throws SQLException {
        String tableName = "t_prod_sell_info";
        String selectColumns = "cust_no, trade_date, prod_code, business_amount, business_price";
        String condition = "WHERE trade_date = "+inputParams[1];
        Dataset<Row> df = SparkSqlConf.getDataByPartition(session, tableName, "", selectColumns, condition);
        df.createOrReplaceTempView(tableName);
        Dataset<Row> mf_archives_df = SparkSqlConf.getDataBySql(session, "mf_archives", "1 = 1", "innercode", "secucode");
        mf_archives_df.createOrReplaceTempView("mf_archives");

        Dataset<Row> sqlDF = session.sql("select res.cust_no, concat_ws('\t',collect_set(res.behive)) as behives from (\n" +
                "  select sc.cust_no, CONCAT(sc.innercode, ':', sc.type, ':', sc.business_amount, ':', sc.business_price, ':', sc.trade_date) as behive\n" +
                "      from (\n" +
                "       select cust_no, trade_date, prod_code, business_amount, business_price,1 as type, ma.innercode from " + inputParams[0] + "\n" +
                "           as buy\n" +
                "           inner join (\n" +
                "           select any_value(innercode) as innercode, secucode from mf_archives group by secucode\n" +
                "       ) as ma\n" +
                "          on ma.secucode = buy.prod_code\n" +
                "       UNION\n" +
                "       select cust_no, trade_date, prod_code, business_amount, business_price,2 as type, ma.innercode from t_prod_sell_info\n" +
                "           as buy\n" +
                "           inner join (\n" +
                "           select any_value(innercode) as innercode, secucode from mf_archives group by secucode\n" +
                "       ) as ma\n" +
                "      on ma.secucode = buy.prod_code\n" +
                "   ) as sc\n" +
                ") as res group by cust_no");

        StructType schema = new StructType(new StructField[]{
                new StructField("cust_no", DataTypes.StringType, false, Metadata.empty()),
                new StructField("behives", DataTypes.StringType, false, Metadata.empty())
        });
        String structType = "cust_no STRING, behives STRING";
        Dataset<Row> historyData = SparkUtil.generateHistoryData(session, schema, structType, inputParams);
        Dataset<Row> rowDataset = historyData.unionAll(sqlDF);
        Dataset<Row> resultTempData = rowDataset.groupBy("cust_no").agg(functions.concat_ws("\t", functions.collect_list("behives")).as("behives"));
        resultTempData.select("cust_no","behives").createOrReplaceTempView("tempFundView");
        Dataset<Row> sql = session.sql("select cust_no, size(split(behives, '\t')), split(behives, '\t') AS actionList from tempFundView");
        Dataset<Row> flatDataset = sql.select(functions.explode(new Column("actionList")), new Column("cust_no"))
                .selectExpr("col AS action", "cust_no");
        flatDataset.createOrReplaceTempView("fund_temp");
        Dataset<Row> splitFundView = session.sql("select cust_no, innercode, type, business_amount, business_price, trade_date from\n" +
                "(select cust_no,\n" +
                "       split(action,':')[0] as innercode,\n" +
                "       split(action,':')[1] as type,\n" +
                "       split(action,':')[2] as business_amount,\n" +
                "       split(action,':')[3] as business_price,\n" +
                "       split(action,':')[4] as trade_date,\n" +
                "       ROW_NUMBER() OVER(PARTITION BY split(action,':')[0] ORDER BY split(action,':')[4] DESC) AS rn\n" +
                "from fund_temp)\n" +
                "where rn <= 5000");
        splitFundView.createOrReplaceTempView("splitFundView");
        Dataset<Row> groupFundView = session.sql("select cust_no, CONCAT(innercode, ':', type, ':', business_amount, ':', business_price, ':', trade_date) as behive, trade_date from splitFundView");
        groupFundView.createOrReplaceTempView("groupFundView");
        Dataset<Row> resultData = session.sql("SELECT cust_no, concat_ws('\t',sort_array(collect_list(struct(trade_date, behive)),true).behive) as behives FROM groupFundView GROUP BY cust_no");

        /**
         * 输出当日计算的最新数据结果
         */
        sqlDF.coalesce(1).write()
                .option("header", "true")
                .option("mode", "overwrite")
                .csv(outputPath + EnvConstant.CURRENT_DATA_PATH);
        // 写入HDFS文件
        resultData.coalesce(1).write()
                .option("header", "true")
                .option("mode", "overwrite")
                .csv(outputPath + EnvConstant.HISTORY_DATA_PATH);
    }
}
