package yuekao3.machine;

import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.operator.common.evaluation.TuningBinaryClassMetric;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.DecisionTreeClassifier;
import com.alibaba.alink.pipeline.classification.LogisticRegression;
import com.alibaba.alink.pipeline.dataproc.vector.VectorAssembler;
import com.alibaba.alink.pipeline.feature.OneHotEncoder;
import com.alibaba.alink.pipeline.tuning.BinaryClassificationTuningEvaluator;
import com.alibaba.alink.pipeline.tuning.GridSearchCV;
import com.alibaba.alink.pipeline.tuning.ParamGrid;

public class Titanic {
    public static void main(String[] args) throws Exception {
        //构建Alink开发环境，设置批处理并行度为1，加载原始训练数据集和测试数据集，控制台输出样本数据和条目数；（3分）
        String filePath = "data/yk3/test.csv";
        String schema
            //PassengerId,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
                = "PassengerId int,Pclass int,Name String,Sex String,Age double,SibSp int,Parch int,Ticket int,Fare double,Cabin string,Embarked string";
        CsvSourceBatchOp testcsvSource = new CsvSourceBatchOp()
                .setFilePath(filePath)
                .setSchemaStr(schema)
                .setFieldDelimiter(",")
                .setSkipBlankLine(true)
                .setLenient(true)
                .setIgnoreFirstLine(true);

        String filePath1 = "data/yk3/train.csv";
        String schema1
                //PassengerId,         Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
                //PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
                = "PassengerId int,Survived int,Pclass int,Name String,Sex String,Age double,SibSp int,Parch int,Ticket int,Fare double,Cabin string,Embarked string";
        CsvSourceBatchOp traincsvSource = new CsvSourceBatchOp()
                .setFilePath(filePath1)
                .setSchemaStr(schema1)
                .setFieldDelimiter(",")
                .setSkipBlankLine(true)
                .setLenient(true)
                .setIgnoreFirstLine(true);


//        testcsvSource.print();
//        traincsvSource.print();
        System.out.println(testcsvSource.count());
        System.out.println(traincsvSource.count());
        //（2）、特征工程：对训练数据集，选择合适特征 feature（特征数不少5个），并且使用热编码OneHot对类别特征进行转换（至少转换2个特征），特征数据封装到向量Vector类型字段；（8分）
        String[] feature=new String[]{"Name","Sex","Fare","Cabin","Embarked"};
        String label="Survived";

        OneHotEncoder one_hot = new OneHotEncoder()
                .setSelectedCols("Name","Sex","Cabin","Embarked")
                .setOutputCols("fec");

        BatchOperator<?> transform1 = one_hot.fit(traincsvSource).transform(traincsvSource);
        BatchOperator<?> transform2 = one_hot.fit(testcsvSource).transform(testcsvSource);

        VectorAssembler res = new VectorAssembler()
                .setSelectedCols("fec", "Fare")
                .setOutputCol("vec");

        BatchOperator<?> transform = res.transform(transform1);
        //（3）、数据集划分：调用Alink库中API，按照8：2比例划分训练数据集为trainData和 validateData，并且输出条目数；（3分）
        BatchOperator <?> spliter = new SplitBatchOp().setFraction(0.8);
        BatchOperator<?> trainData = spliter.linkFrom(transform);
        BatchOperator<?> validateData = spliter.getSideOutput(0);
        System.out.println(trainData.count() + "-:-" + validateData.count());
        //（4）、构建分类模型：选择Alink库中分类算法（逻辑回归、决策树分类、XGBoost分类、KNN近邻分类）任选二种，合理设置超参数值，使用trainData数据集训练构建模型；（8分）
        //逻辑回归
        LogisticRegression lr = new LogisticRegression()
//                .setFeatureCols("f0", "f1")
                .setVectorCol("vec")
                .setLabelCol(label)
                .setPredictionDetailCol("pred_detail")
                .setPredictionCol("pred")
                .setMaxIter(1)
                .setNumThreads(1);
        lr.fit(trainData).getModelData().print("_____");
        //决策树分类
        DecisionTreeClassifier dtc = new DecisionTreeClassifier()
                .setPredictionDetailCol("pred_detail")
                .setPredictionCol("pred")
                .setLabelCol(label)
                .setFeatureCols(feature)
                .setMaxBins(128)
                .setMaxDepth(1);
        dtc.fit(trainData).getModelData().print("-----");
        BatchOperator<?> lr_operator = lr.fit(trainData).transform(validateData);
        BatchOperator<?> dtc_operator = dtc.fit(trainData).transform(validateData);
        //（5）、模型指标评估：在训练集trainData和测试集validateData上对前面构建分类模型，分别进行预测评估，计算准确率Accuracy、PRC面积、AUC面积，并比较之间差异；（5分）
        EvalBinaryClassBatchOp metrics = new EvalBinaryClassBatchOp()
                .setLabelCol(label)
                .setPredictionDetailCol("pred_detail");

        BinaryClassMetrics lr_binaryClassMetrics = metrics.linkFrom(lr_operator).collectMetrics();
        System.out.println("PRC:" + lr_binaryClassMetrics.getPrc());
        System.out.println("Accuracy:" + lr_binaryClassMetrics.getAccuracy());
        System.out.println("Auc:" + lr_binaryClassMetrics.getAuc());

        BinaryClassMetrics dtc_binaryClassMetrics = metrics.linkFrom(dtc_operator).collectMetrics();
        System.out.println("PRC:" + dtc_binaryClassMetrics.getPrc());
        System.out.println("Accuracy:" + dtc_binaryClassMetrics.getAccuracy());
        System.out.println("Auc:" + dtc_binaryClassMetrics.getAuc());
        //（6）、模型参数调优：上述每个分类算法，至少选择2个超参数，设置不同值，进行模型训练和评估，最终获取每个分类算法的最佳模型，并保存模型到本地文件系统；（8分）
        BinaryClassificationTuningEvaluator tuningEvaluator = new BinaryClassificationTuningEvaluator()
                .setLabelCol(label)
                .setPredictionDetailCol("pred_detail")
                .setTuningBinaryClassMetric(TuningBinaryClassMetric.ACCURACY);

        ParamGrid paramGrid1 = new ParamGrid()
                .addGrid(lr, LogisticRegression.L_1, new Double[] {1.0, 0.99, 0.98})
                .addGrid(lr, LogisticRegression.MAX_ITER, new Integer[] {3, 6, 9});

        GridSearchCV cv1 = new GridSearchCV()
                .setEstimator(lr)
                .setParamGrid(paramGrid1)
                .setTuningEvaluator(tuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");

        PipelineModel lr_bestPipelineModel = cv1.fit(trainData).getBestPipelineModel();


        ParamGrid paramGrid2 = new ParamGrid()
                .addGrid(dtc, DecisionTreeClassifier.MAX_BINS, new Integer[] {1, 2, 3})
                .addGrid(dtc, DecisionTreeClassifier.MAX_DEPTH, new Integer[] {3, 6, 9});

        GridSearchCV cv2 = new GridSearchCV()
                .setEstimator(dtc)
                .setParamGrid(paramGrid2)
                .setTuningEvaluator(tuningEvaluator)
                .setNumFolds(2)
                .enableLazyPrintTrainInfo("TrainInfo");

        PipelineModel dtc_bestPipelineModel = cv2.fit(trainData).getBestPipelineModel();

        lr_bestPipelineModel.save("data/yk3/lr_test.csv");
        dtc_bestPipelineModel.save("data/yk3/dtc_test.csv");



        //（7）、最佳模型预测：对测试数据集进行特征工程，使用构建最佳模型分别预测生存，控制台显示结果；（5分）
        BatchOperator<?> transform3 = lr_bestPipelineModel.transform(trainData);
        transform3.print();

        BatchOperator<?> transform4 = dtc_bestPipelineModel.transform(trainData);
        transform4.print();
    }
}
