package yuekao5.machine;

import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalMultiClassBatchOp;
import com.alibaba.alink.operator.batch.sink.CsvSinkBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.operator.common.evaluation.MultiClassMetrics;
import com.alibaba.alink.operator.common.evaluation.TuningBinaryClassMetric;
import com.alibaba.alink.operator.common.evaluation.TuningMultiClassMetric;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.DecisionTreeClassifier;
import com.alibaba.alink.pipeline.classification.KnnClassifier;
import com.alibaba.alink.pipeline.classification.LogisticRegression;
import com.alibaba.alink.pipeline.dataproc.vector.VectorAssembler;
import com.alibaba.alink.pipeline.feature.OneHotEncoder;
import com.alibaba.alink.pipeline.tuning.BinaryClassificationTuningEvaluator;
import com.alibaba.alink.pipeline.tuning.GridSearchCV;
import com.alibaba.alink.pipeline.tuning.MultiClassClassificationTuningEvaluator;
import com.alibaba.alink.pipeline.tuning.ParamGrid;

public class KaggleDiabetes {
    public static void main(String[] args) throws Exception {
        //创建Maven Project添加pom依赖，创建Flink程序，调用Alink API，设置并行度为1，读取糖尿病数据集，
        // 提取特征 features 和目标值 label ，明确目标值label对应的样本字段，并注明业务含义，输出控制台；（3分）
        BatchOperator.setParallelism(1);
        String filePath = "data/yk5/kaggle-diabetes.txt";
        String schema
                //Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age,Outcome
                //6,148,72,35,0,33.6,0.627,50,1
                = "Pregnancies int,Glucose int,BloodPressure int,SkinThickness int,Insulin int,BMI double,DiabetesPedigreeFunction double," +
                "Age int,Outcome int,";
        CsvSourceBatchOp csvSource = new CsvSourceBatchOp()
                .setFilePath(filePath)
                .setSchemaStr(schema)
                .setFieldDelimiter(",");
//        csvSource.print();
        String[] features=new String[]{"Pregnancies","Glucose","BloodPressure","SkinThickness","Insulin","BMI","DiabetesPedigreeFunction","Age"};
        String label="Outcome";
        //（2）、对数据集中年龄age类别特征转换，特征数据封装到向量Vector类型字段；（4分）
        OneHotEncoder one_hot = new OneHotEncoder().setSelectedCols("Age").setOutputCols("new_Age");
        BatchOperator<?> transform = one_hot.fit(csvSource).transform(csvSource);
        VectorAssembler res = new VectorAssembler()
                .setSelectedCols("Pregnancies","Glucose","BloodPressure","SkinThickness","Insulin","BMI","DiabetesPedigreeFunction","new_Age")
                .setOutputCol("vec");
        BatchOperator<?> transform1 = res.transform(transform);
        //（3）、调用Alink API，按照8/2比例划分数据集为：训练数据集trainData和测试数据集testData，统计数据条目数，输出控制台；（3分）
        BatchOperator<?> spliter = new SplitBatchOp().setFraction(0.8);
        BatchOperator<?> trainData = spliter.linkFrom(transform1);
        BatchOperator<?> testData = spliter.getSideOutput(0);

        System.out.println("trainData条目数:"+trainData.count());
        System.out.println("testData条目数:"+testData.count());
        //（4）、构建分类模型：从逻辑回归算法、K近邻算法、Adaboost算法、决策树算法及随机森林中，任选三种算法，使用训练数据trainData，分别构建不同分类模型，合理设置参数值；（8分）
        //逻辑回归
        LogisticRegression lr = new LogisticRegression()
//                .setFeatureCols("f0", "f1")
                .setVectorCol("vec")
                .setLabelCol(label)
                .setPredictionCol("pred")
                .setPredictionDetailCol("pred_detail")
                .setNumThreads(1)
                .setMaxIter(1)
                .enableLazyPrintModelInfo();

        BatchOperator<?> lr_transform = lr.fit(trainData).transform(testData);
        //K近邻算法
        KnnClassifier knn = new KnnClassifier()
                .setVectorCol("vec")
                .setPredictionCol("pred")
                .setPredictionDetailCol("pred_detail")
                .setLabelCol(label)
                .setK(3)
                .setK(1)
                .setNumThreads(1)
                .enableLazyPrintTransformStat();

        BatchOperator<?> knn_transform = knn.fit(trainData).transform(testData);
        //决策树分类器
        DecisionTreeClassifier dtc = new DecisionTreeClassifier()
                .setPredictionDetailCol("pred_detail")
                .setPredictionCol("pred")
                .setLabelCol(label)
                .setFeatureCols(features)
                .setMaxBins(128)
                .setMaxDepth(1)
                .enableLazyPrintModelInfo();

        BatchOperator<?> dtc_transform = dtc.fit(trainData).transform(testData);

        //（5）、构建分类模型：调用Alink中批组件分类评估API，使用验证数据集预测评估上述构建2个不同分类模型性能，评估指标：准确率Accuracy 和 召回率Recall；（6分）
        //多分类评估
        EvalMultiClassBatchOp metrics1 = new EvalMultiClassBatchOp()
                .setLabelCol(label)
                .setPredictionDetailCol("pred_detail");

        MultiClassMetrics lr_binaryClassMetrics = metrics1.linkFrom(lr_transform).collectMetrics();
        System.out.println("准确率Accuracy:"+lr_binaryClassMetrics.getAccuracy());
        System.out.println("召回率Recall:"+lr_binaryClassMetrics.getMacroRecall());

        MultiClassMetrics knn_binaryClassMetrics = metrics1.linkFrom(knn_transform).collectMetrics();
        System.out.println("准确率Accuracy:"+knn_binaryClassMetrics.getAccuracy());
        System.out.println("召回率Recall:"+knn_binaryClassMetrics.getMacroRecall());

        MultiClassMetrics dtc_binaryClassMetrics = metrics1.linkFrom(dtc_transform).collectMetrics();
        System.out.println("准确率Accuracy:"+dtc_binaryClassMetrics.getAccuracy());
        System.out.println("召回率Recall:"+dtc_binaryClassMetrics.getMacroRecall());

        //（6）、构建分类模型：对第（4）题中选择的3个算法，分别对超参数设置不同的值（每个算法至少2个超参数值设置），训练预测评估模型，获取最佳模型；（8分）
//        BinaryClassificationTuningEvaluator tuningEvaluator = new BinaryClassificationTuningEvaluator()
//                .setLabelCol(label)
//                .setPredictionDetailCol("pred_detail")
//                .setTuningBinaryClassMetric(TuningBinaryClassMetric.ACCURACY);

        MultiClassClassificationTuningEvaluator multiClassClassificationTuningEvaluator = new MultiClassClassificationTuningEvaluator()
                .setLabelCol(label)
                .setPredictionCol("pred")
                .setPredictionDetailCol("pred_detail")
                .setTuningMultiClassMetric(TuningMultiClassMetric.ACCURACY);

        //lr
        ParamGrid paramGrid1 = new ParamGrid()
                .addGrid(lr, LogisticRegression.L_1, new Double[] {1.0, 0.99, 0.98})
                .addGrid(lr, LogisticRegression.MAX_ITER, new Integer[] {3, 6, 9});

        GridSearchCV cv1 = new GridSearchCV()
                .setEstimator(lr)
                .setParamGrid(paramGrid1)
                .setTuningEvaluator(multiClassClassificationTuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");



        //knn
        ParamGrid paramGrid2 = new ParamGrid()
                .addGrid(knn, KnnClassifier.K, new Integer[] {1, 2, 3})
                .addGrid(knn, KnnClassifier.NUM_THREADS, new Integer[] {3, 6, 9});

        GridSearchCV cv2 = new GridSearchCV()
                .setEstimator(knn)
                .setParamGrid(paramGrid2)
                .setTuningEvaluator(multiClassClassificationTuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");




        //dtc
        ParamGrid paramGrid3 = new ParamGrid()
                .addGrid(dtc, DecisionTreeClassifier.MAX_BINS, new Integer[] {1, 2, 3})
                .addGrid(dtc, DecisionTreeClassifier.MAX_DEPTH, new Integer[] {3, 6, 9});

        GridSearchCV cv3 = new GridSearchCV()
                .setEstimator(dtc)
                .setParamGrid(paramGrid3)
                .setTuningEvaluator(multiClassClassificationTuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");

        //（7）、构建分类模型：使用最佳3个分类模型对测试数据集testData进行预测，并计算分类评估准确率Accuracy值；（5分）
        PipelineModel lr_bestPipelineModel = cv1.fit(trainData).getBestPipelineModel();
        BatchOperator<?> lr_transform2 = lr_bestPipelineModel.transform(testData);

        PipelineModel knn_bestPipelineModel2 = cv2.fit(trainData).getBestPipelineModel();
        BatchOperator<?> knn_transform2 = knn_bestPipelineModel2.transform(testData);

        PipelineModel dtc_bestPipelineModel2 = cv3.fit(trainData).getBestPipelineModel();
        BatchOperator<?> dtc_transform2 = dtc_bestPipelineModel2.transform(testData);

        MultiClassMetrics lr_multiClassMetrics = metrics1.linkFrom(lr_transform2).collectMetrics();
        System.out.println("lr Accuracy:"+lr_multiClassMetrics.getAccuracy());

        MultiClassMetrics knn_multiClassMetrics = metrics1.linkFrom(knn_transform2).collectMetrics();
        System.out.println("knn Accuracy:"+knn_multiClassMetrics.getAccuracy());

        MultiClassMetrics dtc_multiClassMetrics = metrics1.linkFrom(dtc_transform2).collectMetrics();
        System.out.println("dtc Accuracy:"+dtc_multiClassMetrics.getAccuracy());

        //（8）、分类模型保存：获取每个分类算法的最佳模型，并保存模型到本地文件系统；（3分）
        lr_bestPipelineModel.save("data/yk5/one/lr_model.csv");
        knn_bestPipelineModel2.save("data/yk5/one/knn_model.csv");
        dtc_bestPipelineModel2.save("data/yk5/one/dtc_model.csv");

        lr_transform.link(new CsvSinkBatchOp().setFilePath("data/yk5/one/lr_test.txt"));
        knn_transform.link(new CsvSinkBatchOp().setFilePath("data/yk5/one/knn_test.txt"));
        dtc_transform.link(new CsvSinkBatchOp().setFilePath("data/yk5/one/dtc_test.txt"));
    }
}
