package ccnl.demo.algo;

import ccnl.demo.JrddTools;
import org.apache.commons.math3.analysis.function.Logistic;
import org.apache.spark.Accumulator;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.ml.classification.LogisticRegression;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.ml.param.ParamMap;
import org.apache.spark.mllib.classification.LogisticRegressionModel;
//import org.apache.spark.ml.classification.LogisticRegressionModel;
import org.apache.spark.ml.feature.OneHotEncoder;
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS;
import org.apache.spark.mllib.classification.SVMModel;
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics;
import org.apache.spark.mllib.evaluation.MulticlassMetrics;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.*;
import org.codehaus.janino.Java;
import scala.Tuple2;

import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * Created by wong on 16/3/30.
 */
public class LogisticReg implements Serializable {
    JavaSparkContext javaSparkContext;
    SQLContext sqlContext;
    public LogisticReg(JavaSparkContext jsc, SQLContext sqlc){
        this.javaSparkContext = jsc;
        this.sqlContext = sqlc;
    }


    public void runFieldListLab(String datasetPath) {
        DataFrame df = sqlContext.read().parquet(datasetPath);
        String[] strlist = {"tagvec", "C38ohe", "C37ohe", "C13ohe", "C8ohe",
        "pohe", "C25", "C24", "C22ohe", "C20ohe"};

        List<String> fieldList = new ArrayList<>(Arrays.asList(strlist));
        while (!fieldList.isEmpty()) {
            System.out.println("start df after remove-----");
            df.printSchema();
            DataFrame dfnew = assembleFields(df, fieldList.toArray(new String[fieldList.size()]));
            runJob(dfnew.toJavaRDD());
            System.out.println("end df after remove-----");
            df = df.drop(fieldList.get(0));
            fieldList.remove(0);
        }
    }

    private DataFrame assembleFields(DataFrame df, String[] list) {
        VectorAssembler assembler = new VectorAssembler().setInputCols(list).setOutputCol("features");
        DataFrame output = assembler.transform(df);
        for (String s : list)
            output = output.drop(s);
        return output;
    }

    /**
     * 根据dataset按日期划分, 并实验
     * @param jrdd
     */
    public void runJob(JavaRDD<Row> jrdd) {
//        DataFrame df = sqlContext.read().parquet(datasetPath);
//        JavaRDD<Row> jrdd = df.toJavaRDD();
        JavaRDD<Row> jrdd_train = JrddTools.getTrain(jrdd);
        JavaRDD<Row> jrdd_test = JrddTools.getTest(jrdd);

        JavaRDD<LabeledPoint> jrdd_lp_train = JrddTools.convertRow2Lp(jrdd_train);
        JavaRDD<LabeledPoint> jrdd_lp_test = JrddTools.convertRow2Lp(jrdd_test);


        JavaRDD<Tuple2<Object, Object>> predictionAndLabels = runGiven(jrdd_lp_train, jrdd_lp_test);
        printMetricsLog("1", predictionAndLabels, jrdd_lp_train, jrdd_lp_test);


    }

    /**
     * 给定train和test, 返回预测值和label对
     * @param train
     * @param test
     * @return
     */
    public JavaRDD<Tuple2<Object, Object>> runGiven(JavaRDD<LabeledPoint> train, JavaRDD<LabeledPoint> test) {
        train.cache();
        final LogisticRegressionModel model = new LogisticRegressionWithLBFGS()
                .run(train.rdd());
        model.clearThreshold();
        JavaRDD<Tuple2<Object, Object>> predictionAndLabels = test.map(v1 -> {
            Double prediction = model.predict(v1.features());
            return new Tuple2<>(prediction, v1.label());
        });
        Vector v = model.weights();
        double[] vec = v.toArray();
        try {
            String currPath = System.getProperty("user.dir");
            File file = new File(currPath + "/modelCoefficients.txt");
            file.createNewFile();
            BufferedWriter bw = new BufferedWriter(new FileWriter(file, false));
            for (int i = 0; i < vec.length; i ++) {
                bw.write(Double.toString(vec[i]) + "\n");
            }
            bw.flush();
            bw.close();
        }catch (Exception e) {
            System.out.println("error when saving model coefficients.");
        }


        return predictionAndLabels;
    }

    /**
     * 给定预测值和label对, 返回一系列相关评价
     * @param jobID
     * @param tuple2
     * @param train
     * @param test
     */
    public void printMetricsLog(String jobID, JavaRDD<Tuple2<Object, Object>> tuple2, JavaRDD<LabeledPoint>
            train, JavaRDD<LabeledPoint> test) {
        long trainsetpos = JrddTools.getPosNums(train);
        long trainsetneg = JrddTools.getNegNums(train);
        long testsetpos = JrddTools.getPosNums(test);
        long testsetneg = JrddTools.getNegNums(test);
        double logloss = calLogloss(tuple2);
        System.out.println("------" + jobID + "-------");
        System.out.println("trainSet pos: " + trainsetpos +
                " trainSet neg: " + trainsetneg);
        System.out.println("testSet pos: " + testsetpos +
                " testSet neg: " + testsetneg);
        System.out.println("trainPos/testPos: " + (double) trainsetpos / testsetpos
         + "  trainNeg/testNeg: " + (double) trainsetneg / testsetneg);

        System.out.println("trainSet TOTAL: " + train.count() + "  testSet TOTAL: " + test.count());

        BinaryClassificationMetrics metrics = new BinaryClassificationMetrics(tuple2.rdd());
        double auROC = metrics.areaUnderROC();
        System.out.println("Area Under ROC = " + auROC);
        System.out.println("LogLoss = " + logloss);
        System.out.println("---------------");

    }


    private double calLogloss(JavaRDD<Tuple2<Object, Object>> tuple2) {
        long N = tuple2.count();
        Accumulator<Double> accum = javaSparkContext.accumulator(0.0);
        Accumulator<Integer> checkP = javaSparkContext.accumulator(0);
        Accumulator<Integer> checkN = javaSparkContext.accumulator(0);

        tuple2.foreach( t -> {
            double eps = 1e-15;
            double clipped = Math.max(eps, Math.min(1 - eps, (double) t._1()));

            if ((double) t._2() == 0.0) {
                accum.add(Math.log(1.0 - clipped));
                checkN.add(1);
            }
            else {
                accum.add(Math.log(clipped));
                checkP.add(1);
            }
//            if ((double) t._1() == 1.0 || (double) t._1() == 0.0) {
//                checkP.add(1);
//            }
        });
        System.out.println("N = " + N + "accum = " + accum.value());
        System.out.println("tuple2 positive nums = " + checkP.value() + "tuple2 negative nums" + checkN.value());
//        System.out.println("the prabability is 1 or 0 = " + checkP.value());
        return - accum.value() / N;
    }


    /**
     * 随机sample划分
     * @param filePath
     */
    public void sampled(String filePath) {
        DataFrame df = sqlContext.read().parquet(filePath);
        JavaRDD<Row> jrdd_row = df.toJavaRDD();
        JavaRDD<LabeledPoint> jrdd_lp = jrdd_row.map(v1 -> {
            LabeledPoint lp=new LabeledPoint(v1.getDouble(0), (Vector)v1.get(1));
            return lp;
        });

        JavaRDD<LabeledPoint> training = jrdd_lp.sample(false, 0.8);
        training.cache();
        JavaRDD<LabeledPoint> test = jrdd_lp.subtract(training);
    }

    /**
     * 采样实验
     * @param datasetPath
     */
    public void labWithSample(String datasetPath) {
        DataFrame df = sqlContext.read().parquet(datasetPath);
        JavaRDD<Row> jrdd = df.toJavaRDD();
        JavaRDD<Row> jrdd_train = JrddTools.getTrain(jrdd);
        JavaRDD<Row> jrdd_test = JrddTools.getTest(jrdd);

        //JavaRDD<LabeledPoint> jrdd_lp_train=JrddTools.convertRow2Lp(jrdd_train);
        JavaRDD<LabeledPoint> jrdd_lp_test = JrddTools.convertRow2Lp(jrdd_test);
        System.out.println("lab # no distinct trainset, check for sample rate: ");
        for (int i = 1; i <= 20; i++) {
            System.out.println("sample rate = 1:" + i);
            JavaRDD<Row> jrdd_train_sampled = JrddTools.getSampledTrain(jrdd_train, (double) i);
            JavaRDD<LabeledPoint> jrdd_lp_train_sampled = JrddTools.convertRow2Lp(jrdd_train_sampled);
            JavaRDD<Tuple2<Object, Object>> pl = runGiven(jrdd_lp_train_sampled, jrdd_lp_test);
            long sampledPos = JrddTools.getPosNums(jrdd_lp_train_sampled);
            long sampledNeg = JrddTools.getNegNums(jrdd_lp_train_sampled);
            System.out.println("sampled pos = " + sampledPos);
            System.out.println("sampled neg = " + sampledNeg);
            printMetricsLog("" + i, pl, jrdd_lp_train_sampled, jrdd_lp_test);

        }
    }
}
