package yuekao6.machine;

import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalMultiClassBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.batch.utils.UDFBatchOp;
import com.alibaba.alink.operator.common.evaluation.MultiClassMetrics;
import com.alibaba.alink.operator.common.evaluation.TuningMultiClassMetric;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.LogisticRegression;
import com.alibaba.alink.pipeline.classification.LogisticRegressionModel;
import com.alibaba.alink.pipeline.classification.RandomForestClassificationModel;
import com.alibaba.alink.pipeline.classification.RandomForestClassifier;
import com.alibaba.alink.pipeline.tuning.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.functions.ScalarFunction;
import org.apache.flink.types.Row;

import java.util.List;

public class LosaMachine {
    public static void main(String[] args) throws Exception {
        //1.使用Flink,创建流式/批式环境，加载指定数据提取出特征值和目标值，把label转换为离散值（5分）
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        String filePath = "data/yk6/lpsa.data";
        String schema
                = "label double,f0 double,f1 double,f2 double,f3 double,f4 double,f5 double,f6 double,f7 double";
        CsvSourceBatchOp csvSource = new CsvSourceBatchOp()
                .setFilePath(filePath)
                .setSchemaStr(schema)
                .setFieldDelimiter(",");
//        csvSource.print();

        String[] featur=new String[]{"f0","f1","f2","f3","f4","f5","f6","f7"};
        String label="label";


        UDFBatchOp udfBatchOp = new UDFBatchOp()
                .setFunc(new SubstringFunction())
                .setSelectedCols("label")
                .setOutputCol("new_label")
                .linkFrom(csvSource);

//        udfBatchOp.print();

        //2.使用 Alink API 把数据划分为，训练集，验证集，测试集（5分）
        BatchOperator<?> spliter = new SplitBatchOp().setFraction(0.8);
        BatchOperator<?> trainData = spliter.linkFrom(udfBatchOp);
        BatchOperator<?> sideOutput = spliter.getSideOutput(0);

        BatchOperator<?> spliter1 = new SplitBatchOp().setFraction(0.5);
        BatchOperator<?> testData = spliter1.linkFrom(sideOutput);
        BatchOperator<?> valiData = spliter1.getSideOutput(0);

//        System.out.println("trainData:"+trainData.count());
//        System.out.println("testData:"+testData.count());
//        System.out.println("valiData:"+valiData.count());
        //3.创建模型，（从逻辑回归算法、K近邻算法、Adaboost算法、决策树算法及随机森林）任选两种算法，使用训练数据trainData，分别构建不同分类模型，合理设置参数值;（5分）
        //逻辑回归
        LogisticRegression lr = new LogisticRegression()
                .setFeatureCols(featur)
                .setLabelCol("new_label")
                .setPredictionCol("pred")
                .setPredictionDetailCol("pred_detail")
                .setMaxIter(5)
                .setL1(0.0)
                .enableLazyPrintModelInfo();
        LogisticRegressionModel lr_fit = lr.fit(trainData);
        BatchOperator<?> lr_transform = lr_fit.transform(valiData);

        RandomForestClassifier fc = new RandomForestClassifier()
                .setPredictionDetailCol("pred_detail")
                .setPredictionCol("pred")
                .setLabelCol("new_label")
                .setFeatureCols(featur)
                .setMaxBins(128)
                .setMinInfoGain(0.0)
                .enableLazyPrintModelInfo();
        RandomForestClassificationModel fc_fit = fc.fit(trainData);
        BatchOperator<?> fc_transform = fc_fit.transform(valiData);

        //4.使用验证集对上面的模型进行预测评估不同的模型性能，评估准确率和召回率（5分）
        EvalMultiClassBatchOp evalMultiClassBatchOp = new EvalMultiClassBatchOp()
                .setLabelCol("new_label")
                .setPredictionDetailCol("pred_detail");
        //lr
        MultiClassMetrics lr_multiClassMetrics = evalMultiClassBatchOp.linkFrom(lr_transform)
                .collectMetrics();
        System.out.println("lr的准确率:"+lr_multiClassMetrics.getAccuracy());
        System.out.println("lr的召回率:"+lr_multiClassMetrics.getMacroRecall());

        //fc
        MultiClassMetrics fc_multiClassMetrics = evalMultiClassBatchOp.linkFrom(fc_transform)
                .collectMetrics();
        System.out.println("fc的准确率:"+fc_multiClassMetrics.getAccuracy());
        System.out.println("fc的召回率:"+fc_multiClassMetrics.getMacroRecall());
        //5.对以上两种算法进行参数调优，训练预测评估模型，获取最佳模型（5分）
        MultiClassClassificationTuningEvaluator multiClassClassificationTuningEvaluator = new MultiClassClassificationTuningEvaluator()
                .setPredictionDetailCol("pred_detail")
                .setPredictionCol("pred")
                .setLabelCol("new_label")
                .setTuningMultiClassMetric(TuningMultiClassMetric.ACCURACY);

        //lr
        ParamGrid paramGrid1 = new ParamGrid()
                .addGrid(lr, LogisticRegression.L_1, new Double[] {1.0, 0.99, 0.98})
                .addGrid(lr, LogisticRegression.MAX_ITER, new Integer[] {3, 6, 9});

        GridSearchCV cv1 = new GridSearchCV()
                .setEstimator(lr)
                .setParamGrid(paramGrid1)
                .setTuningEvaluator(multiClassClassificationTuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");
        GridSearchCVModel fit1 = cv1.fit(trainData);
        PipelineModel bestPipelineModel1 = fit1.getBestPipelineModel();
        BatchOperator<?> transform1 = bestPipelineModel1.transform(testData);


        //fc
        ParamGrid paramGrid2 = new ParamGrid()
                .addGrid(fc, RandomForestClassifier.NUM_THREADS, new Integer[] {1, 2, 3})
                .addGrid(fc, RandomForestClassifier.MAX_BINS, new Integer[] {3, 6, 9});

        GridSearchCV cv2 = new GridSearchCV()
                .setEstimator(fc)
                .setParamGrid(paramGrid2)
                .setTuningEvaluator(multiClassClassificationTuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");
        GridSearchCVModel fit2 = cv2.fit(trainData);
        PipelineModel bestPipelineModel2 = fit2.getBestPipelineModel();
        BatchOperator<?> transform2 = bestPipelineModel2.transform(testData);

        //6.使用评估指标较优者，对样本批量预测（5分）
        MultiClassMetrics lr_multiClassMetrics1 = evalMultiClassBatchOp.linkFrom(transform1)
                .collectMetrics();

        System.out.println("逻辑回归准确率:"+lr_multiClassMetrics1.getAccuracy());
        System.out.println("逻辑回归召回率:"+lr_multiClassMetrics1.getMacroRecall());


        MultiClassMetrics lr_multiClassMetrics2 = evalMultiClassBatchOp.linkFrom(transform2)
                .collectMetrics();

        System.out.println("决策树分类准确率:"+lr_multiClassMetrics2.getAccuracy());
        System.out.println("决策树分类召回率:"+lr_multiClassMetrics2.getMacroRecall());


        if(lr_multiClassMetrics1.getAccuracy()>lr_multiClassMetrics2.getAccuracy()){
            System.out.println("逻辑回归较优");
            bestPipelineModel1.transform(udfBatchOp).print("逻辑回归较优:");
        }else {
            System.out.println("决策树分类较优");
            bestPipelineModel2.transform(udfBatchOp).print("决策树分类较优:");
        }
    }

    public static class SubstringFunction extends ScalarFunction {
        public Integer eval(Double label) {
            if(label>2){
                return 1;
            }else {
                return 0;
            }
        }
    }

}
