package com.yusc.business;

import com.google.gson.Gson;
import com.yusc.pojo.ALlBasicInfo;
import com.yusc.pojo.Customer;
import com.yusc.pojo.Standardize;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.ml.clustering.KMeans;
import org.apache.spark.ml.clustering.KMeansModel;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.sql.*;
import org.apache.spark.ml.linalg.Vector;

import org.apache.spark.ml.feature.StandardScalerModel;
import org.apache.spark.ml.feature.StandardScaler;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;


import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;

/**
 * 6、构建模型进行客户价值分析
 * - 6.1 已知存在票价为空的记录。需要保留票价不为0，或平均折扣率不为0且总飞行公里数大于0的记录。需要丢弃年龄大于100的记录
 * - 6.2 属性选择 并将数据转换为适当格式(数据转化)
 * - 6.3 标准差数据标准化
 * - 6.4 K-Means模型对客户进行聚类
 * - 6.5 统计客户聚类分群的结果
 */
@Slf4j
public class Take07 {
    private static Gson gson = new Gson();

    public static void main(String[] args) {
        SparkSession ss = SparkSession.builder()
                .appName("建模数据挖掘")
                .master("local")
                .getOrCreate();

        // 获取到SparkSession的下文对象
        JavaSparkContext jsc = new JavaSparkContext(ss.sparkContext());
        JavaRDD<String> rdd = jsc.textFile("hdfs://master:9000/air_data/*");

        JavaRDD<Customer> map = rdd.map(x -> gson.fromJson(x, ALlBasicInfo.class))
                .filter(x -> {
                    try {
                        boolean fares = x.getSumYr1() != null || x.getSumYr2() != null;
                        boolean b = x.getAvgDiscount() != null & x.getSegKmSum() > 0;
                        boolean age = x.getAge() < 100;
                        boolean ffpData = x.getFfpDate() != null;

                        return fares && b && age && ffpData;
                    } catch (Exception e) {
//                        log.info(String.valueOf(e));
                        return false;
                    }
                })
                .map(x -> {
                    // 会员入会时间到观测窗口结束的月数 L
                    Integer l = Math.toIntExact(ChronoUnit.MONTHS.between(x.getFfpDate(), x.getLoadTime()));
                    // 客户最后一次乘机距离观测窗口结束的月份 R
                    Double r = x.getLstToEnd();
                    // 客户在观测窗口内的飞行次数 F
                    Integer f = x.getFlightCount();
                    // 观测窗口内的飞行公里数 M
                    Long m = x.getSegKmSum();
                    // 观测窗口内折扣系数 C
                    Double c = x.getAvgDiscount();
                    return new Customer(l, r, f, m, c);
                });
        // 将数据转为 Dataset<Row>
        Dataset<Row> df = ss.createDataFrame(map, Customer.class);
//        df.show(10, false);

        // 将所有要标准化的列打包成一个向量列
        VectorAssembler assembler = new VectorAssembler()
                .setInputCols(df.columns())
                .setOutputCol("features");
        Dataset<Row> dfWithFeatures = assembler.transform(df);

        // 创建 StandardScaler
        StandardScaler scaler = new StandardScaler()
                .setInputCol("features")
                .setOutputCol("scaledFeatures")
                .setWithStd(true)
                .setWithMean(true);

        // 使用 StandardScaler 计算标准化参数
        StandardScalerModel scalerModel = scaler.fit(dfWithFeatures);

        // 使用标准化参数对数据进行标准化
        Dataset<Row> scaledData = scalerModel.transform(dfWithFeatures);
//        scaledData.show(10, false);   // 标准化后与课本基本没有给的图例基本没问题，所以标准化正确

        // 打印成表格形式
        Dataset<Row> standardized = scaledData.select("scaledFeatures");
        JavaRDD<Standardize> standardizeJavaRDD = standardized.toJavaRDD()
                .map(x -> {
                    String[] split = x.get(0).toString().replace("[", "").replace("]", "")
                            .split(",");
                    return Standardize.builder()
                            .c(Double.valueOf(split[0]))
                            .f(Double.valueOf(split[1]))
                            .l(Double.valueOf(split[2]))
                            .m(Double.valueOf(split[3]))
                            .r(Double.valueOf(split[4]))
                            .build();
                });
        Dataset<Row> dataFrame = ss.createDataFrame(standardizeJavaRDD, Standardize.class);
        dataFrame.show(10, false);

        // 训练一个k-means模型.
        KMeans kmeans = new KMeans()
                .setK(5)    // 设置聚类中心数
                .setFeaturesCol("scaledFeatures")  //会自动以features作为特征数据列
                .setSeed(123);  // 随机种子
        KMeansModel model = kmeans.fit(scaledData); // 模型训练

        // 应用模型并获取每个数据点所属的聚类标签
        Dataset<Row> transformedData = model.transform(scaledData);
        Dataset<Row> prediction = transformedData.groupBy("prediction").count();    // 统计每个簇的个数


        // 获取簇类中心
        Vector[] centers = model.clusterCenters();
        for (int i = 0; i < centers.length; i++) {
            System.out.println("Cluster center " + i + ": " + centers[i]);
        }

        // 在 KMeans 模型中，聚类标签（prediction）与簇中心（cluster center）索引是对应的。
        // 这意味着 centers 数组中的第一个聚类中心（centers[0]）对应预测标签 0，第二个聚类中心（centers[1]）对应预测标签 1

        List<Row> rows = new ArrayList<>();
        for (int i = 0; i < centers.length; i++) {
            Row row = RowFactory.create(i, centers[i]);
            rows.add(row);
        }

        StructType schema = new StructType()
                .add("prediction", DataTypes.IntegerType)
                .add("center", new VectorUDT());
        Dataset<Row> clusterCentersData = ss.createDataFrame(rows, schema);

        Dataset<Row> predictionWithCenter = prediction.join(clusterCentersData, "prediction");
        predictionWithCenter.show(false);

        jsc.stop();
    }
}
