package com.swsc.ai.tasks;

import com.swsc.ai.config.SparkSqlConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;
import scala.collection.JavaConverters;
import scala.collection.Seq;

import java.util.ArrayList;
import java.util.List;
import java.util.stream.IntStream;

/**
 * @describe:
 * @author: 容若
 * @created: 2023-12-04 15:17
 */
public class GBDTAndLRTask extends ProdEstimationTempTask {
    @Override
    public void createView(SparkSession session, String portraitPath, String prodPath) {
        String sql = "SELECT cust_no, prod_code FROM t_prod_buy_info WHERE prod_code != 'BF0002' and prod_code != '970161' and prod_code != 'S00001' and prod_code != 'D01007' and prod_code != 'D00007' and prod_code !='D00014' and prod_code != 'D02007' and prod_code != 'F00007' and prod_code != 'F00014' and business_balance >= 3000";
        Dataset<Row> prodBuyCust = SparkSqlConf.getDataByCompleteSQL(session, sql);
        prodBuyCust.createOrReplaceTempView("prodBuyInfo");

        String sqlFull = "SELECT distinct cust_no FROM t_prod_buy_info";
        Dataset<Row> prodBuyCustFull = SparkSqlConf.getDataByCompleteSQL(session, sqlFull);
        prodBuyCustFull.createOrReplaceTempView("prodBuyCustFull");

        Dataset<Row> fundData = createRealView(session, prodPath);
        fundData.createOrReplaceTempView("fundData");
        Dataset<Row> portraitDF = createRealView(session, portraitPath);
        portraitDF.createOrReplaceTempView("portrait");
    }

    private Dataset<Row> createRealView(SparkSession session, String hdfsPath) {
        Dataset<Row> wideTableDF = null;
        try {
            wideTableDF = session.read().format("csv").option("header", "true").load(hdfsPath);
        } catch (Exception e) {
            System.err.println("未找到有效的宽表数据，查找路径为：" + hdfsPath);
        }
        return wideTableDF;
    }

    @Override
    public void calculateEstimationResult(SparkSession session, String fileName) {
        Dataset<Row> GBDTLRPositiveSample = session.sql("select pb.*, f.* from (\n" +
                "    select 1 as label, u.*, p.prod_code from (select cust_no, prod_code from prodBuyInfo) as p inner join (\n" +
                "        select * from portrait where normal_status = '正常' and cust_type = '个人' and no_trd_days <= 1100\n" +
                "    ) as u\n" +
                "    on p.cust_no = u.client_id where u.client_id is not null\n" +
                ") as pb\n" +
                "inner join (\n" +
                "    select innercode,maincode,companycode,typecode,fundnatureid,investmenttypecode,investstylecode,fundtypecode,\n" +
                "       floattypecode,foundedsize,investadvisorcode,datacode,RiskLevel,personalcode,gendercode,nationalitycode,\n" +
                "       educationcode,experiencetime,returntypeavg,returntyperank,monrettypeavg,monrettyperank,totalaumtypeavg,\n" +
                "       totalaumrank,avgaumtypeavg,avgaumtyperank,enddate,prod_name from fundData\n" +
                ") as f\n" +
                "on f.maincode = pb.prod_code where f.maincode is not null\n");

        Dataset<Row> tempNegativeSample = session.sql("select u.* from prodBuyCustFull as p\n" +
                "right join (\n" +
                "    select * from portrait where normal_status = '正常' and cust_type = '个人' and no_trd_days <= 1100\n" +
                ") as u on p.cust_no = u.client_id where p.cust_no is null\n");
        tempNegativeSample.createOrReplaceTempView("NegUser");

        Dataset<Row> prodDataset = session.sql("select maincode from fundData");
        Dataset<Row> userDataset = session.sql("select client_id from NegUser");
        int[] array = IntStream.range(0, 30).toArray();
        prodDataset = prodDataset.withColumn("row_num", functions.explode(functions.lit(array))).drop(functions.col("row_num"));
        Dataset<Row> prodShuffle = prodDataset.orderBy(functions.rand());
        Dataset<Row> userShuffle = userDataset.orderBy(functions.rand());

        JavaPairRDD<Long, Row> prodPairRDD = prodShuffle.toJavaRDD().zipWithIndex().mapToPair(tuple -> {
            Long key = tuple._2;
            Row val = tuple._1;
            return new Tuple2<>(key, val);
        });
        JavaPairRDD<Long, Row> custPairRDD = userShuffle.toJavaRDD().zipWithIndex().mapToPair(tuple -> {
            Long key = tuple._2;
            Row val = tuple._1;
            return new Tuple2<>(key, val);
        });
        JavaPairRDD<Long, Tuple2<Row, Row>> joinRDD = custPairRDD.join(prodPairRDD);
        StructType schema = new StructType(new StructField[]{
                new StructField("client_id", DataTypes.StringType, false, Metadata.empty()),
                new StructField("prod_code", DataTypes.StringType, false, Metadata.empty())
        });
        JavaRDD<Row> rtRDD = joinRDD.map(tuple -> {
            List<Object> rowContent = new ArrayList<>();
            List<Object> tp1 = JavaConverters.seqAsJavaList(tuple._2._1.toSeq());
            List<Object> tp2 = JavaConverters.seqAsJavaList(tuple._2._2.toSeq());
            rowContent.addAll(tp1);
            rowContent.addAll(tp2);
            Seq<Object> rtSeq = JavaConverters.asScalaIteratorConverter(rowContent.iterator()).asScala().toSeq();
            return Row.fromSeq(rtSeq);
        });
        Dataset<Row> dataView = session.createDataFrame(rtRDD, schema);
        dataView.createOrReplaceTempView("UserProdIndex");
        Dataset<Row> GBDTLRNegativeSample = session.sql("select 0 as label, ut.*, f.* from (\n" +
                "  select u.*, t.prod_code from (select client_id, prod_code from UserProdIndex) as t inner join\n" +
                "      NegUser as u \n" +
                "  on u.client_id = t.client_id \n" +
                ") as ut\n" +
                "left join (\n" +
                "    select innercode,maincode,companycode,typecode,fundnatureid,investmenttypecode,investstylecode,fundtypecode,\n" +
                "       floattypecode,foundedsize,investadvisorcode,datacode,RiskLevel,personalcode,gendercode,nationalitycode,\n" +
                "       educationcode,experiencetime,returntypeavg,returntyperank,monrettypeavg,monrettyperank,totalaumtypeavg,\n" +
                "       totalaumrank,avgaumtypeavg,avgaumtyperank,enddate,prod_name from fundData\n" +
                ") as f\n" +
                "on f.maincode = ut.prod_code");
        Dataset<Row> sqlDF = GBDTLRPositiveSample.unionAll(GBDTLRNegativeSample);
        // 写入HDFS文件
        sqlDF.coalesce(1).write()
                .format("csv")
                .option("header", "true")
                .option("mode", "overwrite")
                .save(fileName);
    }
}
