package com.bw;

import com.alibaba.alink.common.io.filesystem.FilePath;
import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalRegressionBatchOp;
import com.alibaba.alink.operator.batch.sink.AkSinkBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.batch.sql.SelectBatchOp;
import com.alibaba.alink.operator.batch.utils.UDFBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.operator.common.evaluation.RegressionMetrics;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.LogisticRegression;
import com.alibaba.alink.pipeline.classification.LogisticRegressionModel;
import com.alibaba.alink.pipeline.dataproc.MinMaxScaler;
import com.alibaba.alink.pipeline.dataproc.MinMaxScalerModel;
import com.alibaba.alink.pipeline.regression.LinearRegression;
import com.alibaba.alink.pipeline.regression.LinearRegressionModel;
import com.alibaba.alink.pipeline.regression.RidgeRegression;
import com.alibaba.alink.pipeline.regression.RidgeRegressionModel;
import com.alibaba.alink.pipeline.tuning.BinaryClassificationTuningEvaluator;
import com.alibaba.alink.pipeline.tuning.GridSearchCV;
import com.alibaba.alink.pipeline.tuning.GridSearchCVModel;
import com.alibaba.alink.pipeline.tuning.ParamGrid;
import org.apache.flink.table.functions.ScalarFunction;

public class Test1 {
    public static void main(String[] args) throws Exception {
        //导入白血病数据集并进行数据预处理，包括特征缩放和将特征和目标变量分成训练和测试数据集。(（5分）
        BatchOperator.setParallelism(1);
        String schemaStr="f1 int,f2 double,f3 double,f4 double,f5 double,f6 double,f7 double,f8 double,f9 double,f10 double,f11 double,lable int";

        CsvSourceBatchOp csv=new CsvSourceBatchOp()
                .setFilePath("datafile/白血病数据集.txt")
                .setSchemaStr(schemaStr)
                .setIgnoreFirstLine(true)
                .setFieldDelimiter(" ");// 注意不是逗号

//        csv.print();

        // 把f1的列过滤掉
        BatchOperator<?> selectData=new SelectBatchOp()
                .setClause("f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,lable")
                .linkFrom(csv);

        selectData.print();
//        //特征列
        String [] features=new String[]{"f2","f3","f4","f5","f6","f7","f8","f9","f10","f11"};
        //目标值
        String label="lable";

        // 归一化
        MinMaxScalerModel model = new MinMaxScaler()
                .setSelectedCols(features)
                .fit(selectData);
        BatchOperator<?> data=model.transform(selectData);
//        data.print();



        // 把元数据分好类
        UDFBatchOp udfBatchOp =new UDFBatchOp()
                .setFunc(new MyScalarFunction())
                .setSelectedCols("lable")
                .setOutputCol("new_lable")
                .linkFrom(data);

        BatchOperator <?> spliter =new SplitBatchOp().setFraction(0.8);
        spliter.linkFrom(udfBatchOp);
        BatchOperator<?> trainData=spliter;//训练集
        BatchOperator<?> testData=spliter.getSideOutput(0);//测试集

        System.out.println("训练集:"+trainData.count());
        System.out.println("测试集:"+testData.count());
        testData.print();
//        自主选择两种回归算法并初始化参数(5分)

        // 线性回归

        LinearRegression lr = new LinearRegression()
                .setFeatureCols(features)
                .setLabelCol(label)
                .setNumThreads(50)
                .setPredictionCol("pred");
        LinearRegressionModel lr_model = lr.fit(trainData);
        BatchOperator<?> lr_result=lr_model.transform(trainData);

        // 回归指标
        RegressionMetrics metrics = new EvalRegressionBatchOp()
                .setPredictionCol("pred")
                .setLabelCol(label)
                .linkFrom(lr_result)
                .collectMetrics();
        System.out.println("Total Samples Number:" + metrics.getCount());
        System.out.println("SSE:" + metrics.getSse());
        System.out.println("SAE:" + metrics.getSae());
        System.out.println("RMSE:" + metrics.getRmse());
        System.out.println("R2:" + metrics.getR2());

    /*    //岭回归
        RidgeRegression ridge = new RidgeRegression()
                .setFeatureCols(features)
                .setLambda(0.1)
                .setLabelCol(label)
                .setPredictionCol("pred");
        RidgeRegressionModel model1 = ridge.fit(trainData);
        BatchOperator<?> rg_result = model1.transform(trainData);
*/


        // 逻辑回归--分类
        LogisticRegression lg = new LogisticRegression()
                .setFeatureCols(features)
                .setLabelCol("new_lable")
                .setPredictionCol("pred")
                .setMaxIter(20)
                .setNumThreads(1)
                .setPredictionDetailCol("pred_detail")
                .enableLazyPrintModelInfo();


        LogisticRegressionModel lg_model=lg.fit(trainData);
        BatchOperator<?> lg_result=lg_model.transform(testData);

        //3.使用合适的调参工具进行模型参数调优，并输出调优的中间结果(5分)
        ParamGrid paramGrid = new ParamGrid()
                .addGrid(lg, LogisticRegression.MAX_ITER, new Integer[] {2, 3, 4})
                .addGrid(lg, LogisticRegression.L_2, new Double[] {0.2, 0.6, 0.9});
        BinaryClassificationTuningEvaluator tuningEvaluator = new BinaryClassificationTuningEvaluator()
                .setLabelCol("new_lable")
                .setPredictionDetailCol("pred_detail")
                .setTuningBinaryClassMetric("AUC");
        GridSearchCV cv = new GridSearchCV()
                .setEstimator(lg)
                .setParamGrid(paramGrid)
                .setTuningEvaluator(tuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");
        GridSearchCVModel gv_model = cv.fit(trainData);
        PipelineModel base_model=gv_model.getBestPipelineModel();

        //4、评估模型的拟合情况，至少使用两种指标(如精确率、召回率等)(10分)
        BinaryClassMetrics lg_metrics = new EvalBinaryClassBatchOp()
                .setLabelCol("new_lable")
                .setPredictionDetailCol( "pred_detail")
                .linkFrom(lg_result)
                .collectMetrics();
        System.out.println("AUC:" + lg_metrics.getAuc());
        System.out.println("KS:" + lg_metrics.getKs());
        System.out.println("PRC:" + lg_metrics.getPrc());
        System.out.println("Accuracy:" + lg_metrics.getAccuracy());
        System.out.println("Macro Precision:" + lg_metrics.getMacroPrecision());
        System.out.println("Micro Recall:" + lg_metrics.getMicroRecall());
        System.out.println("Weighted Sensitivity:" + lg_metrics.getWeightedSensitivity());

//        5.使用Alink工具可视化模型的拟合效果，并使用较优模型预测样本并保存结果(5分)
        lg_metrics.saveKSAsImage("datafile/ks.jpg",true);
        lg_metrics.savePrecisionRecallCurveAsImage("datafile/pre.jpg",true);

        // 保存模型
        base_model.save("datafile/yk09_bast_model");
//        较优模型预测样本并保存结果
        BatchOperator<?> result = base_model.transform(testData);
        result.link(new AkSinkBatchOp()
                .setFilePath(new FilePath("datafile/yk09_result.ak"))
                .setOverwriteSink(true)
                .setNumFiles(1));
        BatchOperator.execute();


    }


    // 自定义函数
    public static class MyScalarFunction extends ScalarFunction
    {
        public Integer eval(int ev)
        {
            if(ev>=100)
                return 1;
            return 0;
        }
    }
}
