package regression;

import com.google.common.collect.ImmutableMap;
import org.apache.spark.ml.classification.*;
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import scala.collection.JavaConverters;
import scala.collection.Seq;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import static org.apache.spark.sql.functions.count;
import static org.apache.spark.sql.functions.lit;

public class DataLoad {
    public static Dataset<Row> loadcsv(SparkSession spark, String FileName, double sample_percent) {
        Dataset<Row> result = spark.read()
                .format("com.databricks.spark.csv")
                .option("header", "true")
                .load(FileName).sample(sample_percent, 1234L);
        System.out.println(result.schema() + " " + result.count());
        return result;
    }

    public static void main(String[] args) throws IOException {
        SparkSession spark = SparkSession
                .builder()
                .appName("JavaLogisticRegression")
                .getOrCreate();
        String UserInfoFileName = "./src/main/resources/data_format1/data_format1/user_info_format1.csv";
        String UserLogFileName = "./src/main/resources/data_format1/data_format1/user_log_format1.csv";
        String TrainDataFileName = "./src/main/resources/data_format1/data_format1/train_format1.csv";
        String TestDataFileName = "./src/main/resources/data_format1/data_format1/test_format1.csv";
        double sample_percent = 0.3;
        Dataset<Row> user_info = loadcsv(spark, UserInfoFileName, sample_percent).cache();
        Dataset<Row> user_log = loadcsv(spark, UserLogFileName, sample_percent).cache();
        Dataset<Row> train = loadcsv(spark, TrainDataFileName, sample_percent).cache();
        Dataset<Row> test = loadcsv(spark, TestDataFileName, 1).cache();

        test = test.dropDuplicates("user_id", "merchant_id");
        train = train.dropDuplicates("user_id", "merchant_id");
        train = train.withColumn("origin", lit("train"));
        test = test.withColumn("origin", lit("test"));
        test = test.withColumnRenamed("prob", "label");
        user_log = user_log.withColumnRenamed("seller_id", "merchant_id");
        Dataset<Row> matrix = train.union(test);
        matrix = matrix.join(user_info, "user_id")
                .drop(user_info.col("user_id"));
        matrix.show();
        matrix.printSchema();

        String[] user_log_features = {"user_id", "item_id", "cat_id", "merchant_id", "brand_id", "time_stamp", "action_type"};
        for (String feature : user_log_features) {
            user_log = user_log.withColumn(feature, user_log.col(feature).cast(DataTypes.IntegerType));
        }
        user_log.show();

        String[] matrix_features = {"user_id", "merchant_id", "label", "age_range", "gender"};
        for (String feature : matrix_features) {
            matrix = matrix.withColumn(feature, matrix.col(feature).cast(DataTypes.IntegerType));
        }
        matrix = matrix.na().fill(ImmutableMap.of("age_range", 0, "gender", 2));
        matrix.show();

        //用户特征
        List<String> matchList = new ArrayList<>();
        matchList.add("user_id");
        Seq<String> seq = JavaConverters.asScalaIteratorConverter(matchList.iterator()).asScala().toSeq();

        RelationalGroupedDataset groups = user_log.groupBy("user_id");
        Dataset<Row> temp = groups.count().withColumnRenamed("count", "u0");
        matrix = matrix.join(temp, seq, "left_outer");
        //购买的item_id,cat_id,merchant_id,brand_id
        temp = groups.agg(count("item_id")).withColumnRenamed("count(item_id)", "u1");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("cat_id")).withColumnRenamed("count(cat_id)", "u2");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("merchant_id")).withColumnRenamed("count(merchant_id)", "u3");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("brand_id")).withColumnRenamed("count(brand_id)", "u4");
        matrix = matrix.join(temp, seq, "left_outer");
//        //统计操作类型为0，1，2，3的个数
//        temp = groups.agg(mean("action_type")).withColumnRenamed("avg(action_type)", "u5");
//        matrix = matrix.join(temp, seq, "left_outer");
//        matrix = matrix.na().fill(ImmutableMap.of("u5", 0));
        matrix = matrix.na().fill(ImmutableMap.of("u0", 0, "u1", 0, "u2", 0, "u3", 0, "u4", 0));

        matrix.show();
        matrix.printSchema();

        //商店特征
        matchList.clear();
        matchList.add("merchant_id");
        seq = JavaConverters.asScalaIteratorConverter(matchList.iterator()).asScala().toSeq();
        groups = user_log.groupBy("merchant_id");
        temp = groups.count().withColumnRenamed("count", "m0");
        matrix = matrix.join(temp, seq, "left_outer");
        //购买的item_id,cat_id,merchant_id,brand_id
        temp = groups.agg(count("item_id")).withColumnRenamed("count(item_id)", "m1");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("cat_id")).withColumnRenamed("count(cat_id)", "m2");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("brand_id")).withColumnRenamed("count(brand_id)", "m3");
        matrix = matrix.join(temp, seq, "left_outer");
//        //统计操作类型为0，1，2，3的个数
//        temp = groups.agg(mean("action_type")).withColumnRenamed("avg(action_type)", "m4");
//        matrix = matrix.join(temp, seq, "left_outer");
        matrix = matrix.na().fill(ImmutableMap.of("m0", 0, "m1", 0, "m2", 0, "m3", 0));
        matrix.show();
        matrix.printSchema();

        //商店和用户特征
        matchList.clear();
        matchList.add("user_id");
        matchList.add("merchant_id");
        seq = JavaConverters.asScalaIteratorConverter(matchList.iterator()).asScala().toSeq();
        groups = user_log.groupBy("user_id", "merchant_id");
        temp = groups.count().withColumnRenamed("count", "um0");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("item_id")).withColumnRenamed("count(item_id)", "um1");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("cat_id")).withColumnRenamed("count(cat_id)", "um2");
        matrix = matrix.join(temp, seq, "left_outer");
        temp = groups.agg(count("brand_id")).withColumnRenamed("count(brand_id)", "um3");
        matrix = matrix.join(temp, seq, "left_outer");
//        temp = groups.agg(mean("action_type")).withColumnRenamed("avg(action_type)", "um4");
//        matrix = matrix.join(temp, seq, "left_outer");
        matrix = matrix.na().fill(ImmutableMap.of("um0", 0, "um1", 0, "um2", 0, "um3", 0));
        matrix.show();
        Dataset<Row> train_data = matrix.filter(matrix.col("origin").equalTo("train")).drop("origin");
        Dataset<Row> test_data = matrix.filter(matrix.col("origin").equalTo("test")).drop("origin");
        train_data.show();
        test_data.show();

        String[] features = {"user_id", "merchant_id", "u0", "u1", "u2", "u3", "u4",
                "m0", "m1", "m2", "m3", "um0", "um1", "um2", "um3"};
        VectorAssembler assembler = new VectorAssembler().setInputCols(features).setOutputCol("features");
        Dataset<Row> training = assembler.transform(train_data).select("label", "features");
        Dataset<Row> testing = assembler.transform(test_data).select("label", "features");
        training.show();
        training.printSchema();
        Dataset<Row>[] splits = training.randomSplit(new double[]{0.6, 0.4}, 1234L);


        LogisticRegression lr = new LogisticRegression();
        LogisticRegressionModel model = lr.fit(splits[0]);

        BinaryLogisticRegressionTrainingSummary trainingSummary = model.binarySummary();
        Dataset<Row> fMeasure = trainingSummary.fMeasureByThreshold();
        double maxFMeasure = fMeasure.select(functions.max("F-Measure")).head().getDouble(0);
        double bestThreshold = fMeasure.where(fMeasure.col("F-Measure").equalTo(maxFMeasure))
                .select("threshold").head().getDouble(0);
        model.setThreshold(bestThreshold);

//        GBTClassifier gbt = new GBTClassifier().setMaxIter(10);
//        GBTClassificationModel model = gbt.fit(splits[0]);
//        RandomForestClassifier rf = new RandomForestClassifier();
//        RandomForestClassificationModel model = rf.fit(splits[0]);

        // Select example rows to display.
        Dataset<Row> predictions = model.transform(splits[1]);
        predictions.show();
        predictions.groupBy("prediction").count().show();

        // compute accuracy on the test set
        MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
                .setLabelCol("label")
                .setPredictionCol("prediction")
                .setMetricName("accuracy");
        double accuracy = evaluator.evaluate(predictions);
        System.out.println("Test set accuracy = " + accuracy);
        predictions = model.transform(testing);
        predictions.show();
        predictions.groupBy("prediction").count().show();
        Dataset<Row> result = predictions.select("features", "probability");

        BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter("test_out.csv"));
        List<Row> result_list = result.collectAsList();
        for (Row row : result_list) {
            String[] line_list = row.get(0).toString().substring(1).split(",");
            String[] prob_list = row.get(1).toString().substring(1).split(",");
            bufferedWriter.write(line_list[0] + "," + line_list[1] + "," + prob_list[0] + "\n");
        }
        bufferedWriter.close();

        spark.stop();
    }
}
