package com.swsc.ai.tasks;

import com.swsc.ai.config.SparkSqlConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.sql.SQLException;

/**
 * @describe: 产品购买可能性预测
 * @author: DuanCXin
 * @created: 2023-11-13 15:14
 */
public class ProdPurchPredTask extends RecallTemplateTask {
    @Override
    public void createView(SparkSession session, String... str) throws SQLException {
        long startTime = System.currentTimeMillis();
        String sql = "SELECT cust_no, prod_code, prodcode_type, prod_type, business_amount, business_price, business_balance FROM t_prod_buy_info WHERE prod_code != 'S00001'";
        Dataset<Row> prodBuyCust = SparkSqlConf.getDataByCompleteSQL(session, sql);
        prodBuyCust.createOrReplaceTempView("prodBuyCust");
        /**
         * 创建用户宽表视图
         */
        String portraitPath = str[1];
        Dataset<Row> portraitDF = createRealView(session, portraitPath);
        portraitDF.createOrReplaceTempView("portraitData");
        long endTime = System.currentTimeMillis();
        long totalTime = endTime - startTime;
        System.out.println("创建独立视图成功！运行时间： " + totalTime + " 毫秒");
    }

    private Dataset<Row> createRealView(SparkSession session, String hdfsPath) {
        Dataset<Row> wideTableDF = null;
        try {
            wideTableDF = session.read().format("csv").option("header", "true").load(hdfsPath);
        } catch (Exception e) {
            System.err.println("未找到有效的宽表数据，查找路径为：" + hdfsPath);
        }
        return wideTableDF;
    }

    @Override
    public void calculateEstimationResult(SparkSession session, String... condition) {
        if (condition[1].equals("1")) {
            ctrEstimationFundIndex(session, condition[0], "/fund_index");
        }
        if (condition[1].equals("2")) {
            ctrEstimationOtherProd(session, condition[0], "/other");
        }
    }

    /**
     * CTR预估模型-股票型指数基金
     * @param session
     * @param condition
     */
    public void ctrEstimationFundIndex(SparkSession session, String... condition) {
        /**
         * 股票型指数基金正样本
         */
        Dataset<Row> fundIndexPositiveSample = session.sql("select 1 as label, d.* from (\n" +
                "   select cust_no, prod_code, business_price, business_amount from prodBuyCust\n" +
                "   WHERE prodcode_type in ('股票型基金', '股债平衡型基金' ,'混合基金' ,'指数型基金')\n" +
                ") as cp\n" +
                "left join (\n" +
                "    select pd.cust_no, any_value(pd.prod_code) as prod_code, COUNT(*) AS purchase_count, SUM(buy_amount) AS total_amount from (\n" +
                "        select cust_no, prod_code, business_price * business_amount as buy_amount\n" +
                "        from prodBuyCust\n" +
                "    ) as pd\n" +
                "    group by pd.cust_no\n" +
                "    HAVING total_amount >= 5000 AND purchase_count >= 2\n" +
                ") as p\n" +
                "on cp.cust_no = p.cust_no\n" +
                "left join (\n" +
                "    select * from portraitData where normal_status != '销户' and cust_type = '个人'\n" +
                ") as d\n" +
                "on cp.cust_no = d.client_id");
        /**
         * 股票型指数基金负样本
         */
        Dataset<Row> fundIndexNegativeSample = session.sql("select 0 as label, u.* from (\n" +
                "     select d.* from \n" +
                "     (select * from portraitData where normal_status != '销户' and cust_type = '个人' and no_trd_days <= 365) as d\n" +
                "     left join (\n" +
                "       select cust_no, prod_code from prodBuyCust \n" +
                "     ) as p\n" +
                "     on p.cust_no = d.client_id\n" +
                "     where p.cust_no is null\n" +
                ") as u\n"
        );
        Dataset<Row> sqlDF = fundIndexPositiveSample.unionAll(fundIndexNegativeSample);
        String fileName = condition[0] + condition[1];
        // 写入HDFS文件
        sqlDF.coalesce(1).write()
                .format("csv")
                .option("header", "true")
                .option("mode", "overwrite")
                .save(fileName);
    }

    /**
     * CTR预估模型-固收类产品
     * @param session
     * @param condition
     */
    private void ctrEstimationOtherProd(SparkSession session, String... condition) {
        /**
         * 固收类产品正样本
         */
        Dataset<Row> otherProdPositiveSample = session.sql("select 1 as label, d.* from (\n" +
                "    select cust_no, prod_code, business_price, business_amount from prodBuyCust\n" +
                "    WHERE prodcode_type in ('保本型基金', '债券型基金' ,'券商理财' , '收益凭证', '货币型基金') or prod_type = '国债回购'\n" +
                ") as cp\n" +
                "left join (\n" +
                "    select pd.cust_no, any_value(pd.prod_code) as prod_code, COUNT(*) AS purchase_count, SUM(business_price) AS total_amount from (\n" +
                "        select cust_no, prod_code, business_price\n" +
                "        from prodBuyCust\n" +
                "    ) as pd\n" +
                "    group by pd.cust_no\n" +
                "    HAVING total_amount >= 5000 AND purchase_count >= 2\n" +
                ") as p\n" +
                "on cp.cust_no = p.cust_no\n" +
                "left join (\n" +
                "select * from portraitData where normal_status != '销户' and cust_type = '个人'\n" +
                ") as d\n" +
                "on cp.cust_no = d.client_id"
        );
        otherProdPositiveSample.createOrReplaceTempView("otherProdTable");
        /**
         * 固收类产品负样本
         */
        Dataset<Row> otherProdNegativeSample = session.sql("select 0 as label, u.* from (\n" +
                "    select d.* from (\n" +
                "      select * from portraitData where normal_status != '销户' and cust_type = '个人' and no_trd_days <= 365) as d\n" +
                "    left join (\n" +
                "      select cust_no, prod_code from prodBuyCust \n" +
                "    ) as p\n" +
                "    on p.cust_no = d.client_id\n" +
                "    where p.cust_no is null\n" +
                ") as u\n"
        );
        Dataset<Row> sqlDF = otherProdPositiveSample.unionAll(otherProdNegativeSample);
        String fileName = condition[0] + condition[1];
        // 写入HDFS文件
        sqlDF.coalesce(1).write()
                .format("csv")
                .option("header", "true")
                .option("mode", "overwrite")
                .save(fileName);
    }
}
