package com.swsc.ai.tasks;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;
import scala.collection.JavaConverters;
import scala.collection.Seq;

import java.util.ArrayList;
import java.util.List;

/**
 * @describe:
 * @author: 容若
 * @created: 2023-12-04 15:53
 */
public class MyTask extends ProdEstimationTempTask {
    @Override
    public void createView(SparkSession session, String portraitPath, String prodPath) {
        Dataset<Row> a = createRealView(session, "file:///C:\\\\test.csv");
        Dataset<Row> rowDataset = a.unionAll(a).unionAll(a).orderBy(functions.rand());
        a.createOrReplaceTempView("a");
        Dataset<Row> b = createRealView(session, "file:///C:\\\\test1.csv");
        b.createOrReplaceTempView("b");
        JavaPairRDD<Long, Row> aNewRDD = a.toJavaRDD().zipWithIndex().mapToPair(tuple -> {
            Long key = tuple._2;
            Row val = tuple._1;
            return new Tuple2<>(key, val);
        });
        JavaPairRDD<Long, Row> bNewRDD = b.toJavaRDD().zipWithIndex().mapToPair(tuple -> {
            Long key = tuple._2;
            Row val = tuple._1;
            return new Tuple2<>(key, val);
        });
        JavaPairRDD<Long, Tuple2<Row, Row>> joinRDD = aNewRDD.join(bNewRDD);
        joinRDD.collect().forEach(x -> System.out.println(x));
        StructType schema = new StructType(new StructField[]{
                new StructField("client_id", DataTypes.StringType, false, Metadata.empty()),
                new StructField("behives", DataTypes.StringType, false, Metadata.empty()),
                new StructField("del", DataTypes.StringType, false, Metadata.empty()),
                new StructField("normal_status", DataTypes.StringType, false, Metadata.empty()),
                new StructField("cust_type", DataTypes.StringType, false, Metadata.empty()),
                new StructField("no_trd_days", DataTypes.StringType, false, Metadata.empty()),
                new StructField("cust_no", DataTypes.StringType, false, Metadata.empty()),
                new StructField("code", DataTypes.StringType, false, Metadata.empty()),
                new StructField("hight", DataTypes.StringType, false, Metadata.empty())
        });
        JavaRDD<Row> rtRDD = joinRDD.map(tuple -> {
            List<Object> rowContent = new ArrayList<>();
            List<Object> tp1 = JavaConverters.seqAsJavaList(tuple._2._1.toSeq());
            List<Object> tp2 = JavaConverters.seqAsJavaList(tuple._2._2.toSeq());
            rowContent.addAll(tp1);
            rowContent.addAll(tp2);
            Seq<Object> rtSeq = JavaConverters.asScalaIteratorConverter(rowContent.iterator()).asScala().toSeq();
            return Row.fromSeq(rtSeq);
        });
        Dataset<Row> dataView = session.createDataFrame(rtRDD, schema);
        dataView.show();
    }

    private Dataset<Row> createRealView(SparkSession session, String hdfsPath) {
        Dataset<Row> wideTableDF = null;
        try {
            wideTableDF = session.read().format("csv").option("header", "true").load(hdfsPath);
        } catch (Exception e) {
            System.err.println("未找到有效的宽表数据，查找路径为：" + hdfsPath);
        }
        return wideTableDF;
    }

    @Override
    public void calculateEstimationResult(SparkSession session, String fileName) {

    }
}
