package com.bw;

import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalMultiClassBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.batch.sql.FilterBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.operator.common.evaluation.MultiClassMetrics;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.KnnClassificationModel;
import com.alibaba.alink.pipeline.classification.KnnClassifier;
import com.alibaba.alink.pipeline.classification.LogisticRegression;
import com.alibaba.alink.pipeline.classification.LogisticRegressionModel;
import com.alibaba.alink.pipeline.dataproc.vector.VectorAssembler;
import com.alibaba.alink.pipeline.feature.OneHotEncoder;
import com.alibaba.alink.pipeline.sql.Select;
import com.alibaba.alink.pipeline.tuning.*;

public class Test1 {
    public static void main(String[] args) throws Exception {

        //（1）、构建Alink开发环境，设置批处理并行度为1，加载原始训练数据集和测试数据集，控制台输出样本数据和条目数；（3分）
        BatchOperator.setParallelism(1);
        String trainFile = "datafile/train.csv";
        String testFile = "datafile/test.csv";
        String trainSchema = "id int,label double,pc int,name string,sex string,age double,ss int,par int,ticket string,fare double,cabin string,embarked string";
        String testSchema = "id int,pc int,name string,sex string,age double,ss int,par int,ticket string,fare double,cabin string,embarked string";
        CsvSourceBatchOp trainData1 = new CsvSourceBatchOp()
                .setFilePath(trainFile)
                .setSchemaStr(trainSchema)
                .setIgnoreFirstLine(true)
                .setFieldDelimiter(",")
                .setLenient(true); //宽松匹配

        CsvSourceBatchOp testData1 = new CsvSourceBatchOp()
                .setFilePath(testFile)
                .setSchemaStr(testSchema)
                .setIgnoreFirstLine(true)
                .setFieldDelimiter(",")
                .setLenient(true); //宽松匹配

        long count1 = trainData1.count();
        long count2 = testData1.count();
        System.out.println("count1 = " + count1);
        System.out.println("count2 = " + count2);


        //（2）特征工程：对训练数据集，选择合适特征feature（特征数不少5个），并且使用热编码OneHot对类别特征进行转换（至少转换2个特征），特征数据封装到向量Vector类型字段；（8分）
        // 特征列
        String [] features=new String[]{"pc","age","ss","par","fare"};

        // 过滤
        Select select = new Select().setClause("pc,age,ss,par,fare,label");
        BatchOperator<?> selectTranData=select.transform(trainData1);
//        selectTranData.print();

        // 独热编码
        OneHotEncoder one_hot = new OneHotEncoder()
                .setSelectedCols("pc","age").setOutputCols("one_hot");

        BatchOperator<?> one_hotTranData=one_hot.fit(selectTranData).transform(selectTranData);
        one_hotTranData.print();


        // 向量聚合
        VectorAssembler res = new VectorAssembler()
                .setSelectedCols("one_hot","ss","par","fare")
                .setOutputCol("vec");
        BatchOperator<?> vecData=res.transform(one_hotTranData).print();
        vecData.print();

//       （3）数据集划分：调用Alink库中API，按照8：2比例划分训练数据集为trainData和validateData，并且输出条目数；（3分）
        BatchOperator<?> new_trainData =new SplitBatchOp().setFraction(0.8).linkFrom(vecData);
        BatchOperator<?> validateData =new_trainData.getSideOutput(0);
        System.out.println("new_trainData:"+new_trainData.count());
        System.out.println("validateData:"+validateData.count());

        //构建分类模型：选择Alink库中分类算法（逻辑回归、决策树分类、XGBoost分类、KNN近邻分类）任选二种，合理设置超参数值，使用trainData数据集训练构建模型；（8分）

        // 逻辑回归
        LogisticRegression lr = new LogisticRegression()
                .setVectorCol("vec")
                .setLabelCol("label")
                .setMaxIter(50)
                .setL1(0.1)
                .setPredictionDetailCol("prediction_detail")
                .setPredictionCol("pred");
        LogisticRegressionModel lr_model = lr.fit(new_trainData);
        BatchOperator<?> lr_result = lr_model.transform(validateData);

        // KNN
        KnnClassifier knn = new KnnClassifier()
                .setVectorCol("vec")
                .setPredictionCol("pred")
                .setLabelCol("label")
                .setPredictionDetailCol("prediction_detail")
                .setK(3);
        KnnClassificationModel knn_model = knn.fit(new_trainData);
        BatchOperator<?> knn_result = knn_model.transform(validateData);


//    （5）模型指标评估：在训练集trainData和测试集validateData上对前面构建分类模型，分别进行预测评估，计算准确率Accuracy、PRC面积、AUC面积，并比较之间差异；（5分）

//        MultiClassMetrics metrics = new EvalMultiClassBatchOp().setLabelCol("label").setPredictionDetailCol(
//                "prediction_detail").linkFrom(lr_result).collectMetrics();
//        System.out.println("Prefix0 accuracy:" + metrics.getAccuracy());
//        System.out.println("Macro Precision:" + metrics.getMacroPrecision());
//        System.out.println("Micro Recall:" + metrics.getMicroRecall());
//        System.out.println("Weighted Sensitivity:" + metrics.getWeightedSensitivity());

        // 逻辑回归
        BinaryClassMetrics lr_metrics = new EvalBinaryClassBatchOp()
                .setLabelCol("label")
                .setPredictionDetailCol("prediction_detail")
                .linkFrom(lr_result).collectMetrics();

        System.out.println("AUC:" + lr_metrics.getAuc());
        System.out.println("PRC:" + lr_metrics.getPrc());
        System.out.println("Accuracy:" + lr_metrics.getAccuracy());



        // KNN用多分类
        MultiClassMetrics metrics1 = new EvalMultiClassBatchOp().setLabelCol("label").setPredictionDetailCol(
                "prediction_detail").linkFrom(knn_result).collectMetrics();
        System.out.println("Prefix0 accuracy:" + metrics1.getAccuracy());
        System.out.println("Macro Precision:" + metrics1.getMacroPrecision());
        System.out.println("Micro Recall:" + metrics1.getMicroRecall());
        System.out.println("Weighted Sensitivity:" + metrics1.getWeightedSensitivity());

        // 6、模型参数调优：上述每个分类算法，至少选择2个超参数，设置不同值，进行模型训练和评估，最终获取每个分类算法的最佳模型，并保存模型到本地文件系统；（8分）
        // KNN
        ParamGrid knn_paramGrid = new ParamGrid()
                .addGrid(knn,KnnClassifier.K , new Integer[] {3, 7, 12})
                .addGrid(knn,KnnClassifier.NUM_THREADS, new Integer[] {1, 2, 3});
        MultiClassClassificationTuningEvaluator knn_tuningEvaluator = new MultiClassClassificationTuningEvaluator()
                .setLabelCol("label")
                .setPredictionDetailCol("prediction_detail")
                .setTuningMultiClassMetric("ACCURACY");

        GridSearchCV knn_cv = new GridSearchCV()
                .setEstimator(knn)
                .setParamGrid(knn_paramGrid)
                .setTuningEvaluator(knn_tuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");
        GridSearchCVModel knn_model1 = knn_cv.fit(new_trainData);
        PipelineModel knn_best_model=knn_model1.getBestPipelineModel();



        // 对逻辑回归进行调优
        ParamGrid paramGrid = new ParamGrid()
                .addGrid(lr,LogisticRegression.MAX_ITER , new Integer[] {20, 80, 200})
                .addGrid(lr,LogisticRegression.EPSILON, new Double[] {0.3, 2.0, 5.0});

        BinaryClassificationTuningEvaluator tuningEvaluator = new BinaryClassificationTuningEvaluator()
                .setLabelCol("label")
                .setPredictionDetailCol("prediction_detail")
                .setTuningBinaryClassMetric("AUC");
        GridSearchCV cv = new GridSearchCV()
                .setEstimator(lr)
                .setParamGrid(paramGrid)
                .setTuningEvaluator(tuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");
        GridSearchCVModel lr_model2 = cv.fit(new_trainData);
        PipelineModel lr_best_model=lr_model2.getBestPipelineModel();

        // 把模型保存到文件系统
        knn_best_model.save("datafile/yk3_knn_model",true);
        lr_best_model.save("datafile/yk3_lr_model",true);



        //7、最佳模型预测：对测试数据集进行特征工程，使用构建最佳模型分别预测生存，控制台显示结果；（5分）
        // 选择列
        Select test_select = new Select().setClause("pc,age,ss,par,fare");
        BatchOperator<?> test_selectTranData=test_select.transform(testData1);

        // 编码
        OneHotEncoder test_one_hot = new OneHotEncoder()
                .setSelectedCols("pc","age").setOutputCols("one_hot");
        BatchOperator<?> test_one_hotTranData=test_one_hot.fit(test_selectTranData).transform(test_selectTranData);

        // 过滤数据
        BatchOperator <?> op = new FilterBatchOp().setClause(" ss is not null and par is not null and fare is not null");
        // 过滤后的数据
        test_one_hotTranData = test_one_hotTranData.link(op);

        // 向量聚合
        VectorAssembler test_res = new VectorAssembler()
                .setSelectedCols("one_hot","ss","par","fare")
                .setOutputCol("vec");

        // 最后要测试的数据
        BatchOperator<?> test_vecData= test_res.transform(test_one_hotTranData);


        // 拿到最佳模型
        BatchOperator<?> lr_best_result = lr_best_model.transform(validateData);
        BatchOperator<?> knn_best_result = knn_best_model.transform(validateData);
        // 根据指标
        MultiClassMetrics metrics = new EvalMultiClassBatchOp().setLabelCol("label").setPredictionDetailCol(
                "prediction_detail").linkFrom(lr_best_result).collectMetrics();

        MultiClassMetrics metrics2 = new EvalMultiClassBatchOp().setLabelCol("label").setPredictionDetailCol(
                "prediction_detail").linkFrom(knn_best_result).collectMetrics();

        if(metrics.getAccuracy() > metrics2.getAccuracy()){
            lr_best_model.transform(test_vecData).print();
        }else{
            knn_best_model.transform(test_vecData).print();
        }

//        BatchOperator.execute();
    }
}
