package com.csw.alink;

import com.alibaba.alink.common.MLEnvironmentFactory;
import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.pipeline.Pipeline;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.NaiveBayesTextClassifier;
import com.alibaba.alink.pipeline.nlp.DocCountVectorizer;
import com.alibaba.alink.pipeline.nlp.Segment;
import com.alibaba.alink.pipeline.nlp.StopWordsRemover;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.configuration.Configuration;

public class Demo01ReafData {
    public static void main(String[] args) throws Exception{
        ExecutionEnvironment env = MLEnvironmentFactory.getDefault().getExecutionEnvironment();

        Configuration configuration = env.getConfiguration();
        //网络链接超时时间
        configuration.setString("akka.ask.timeout", "100000");


        /*
         * 原始数据
         *
         */
        CsvSourceBatchOp csvSourceBatchOp = new CsvSourceBatchOp()
                .setFilePath("alink/data/train.txt")
                .setSchemaStr("label DOUBLE, text STRING")
                .setFieldDelimiter("\t");


        /**
         * 将数据切分成训练集和测试集
         *
         */
        SplitBatchOp splitBatchOp = new SplitBatchOp()
                .setFraction(0.7)
                .linkFrom(csvSourceBatchOp);

        //训练集
        BatchOperator train_stream_data = splitBatchOp;
        //测试集
        BatchOperator test_stream_data = splitBatchOp.getSideOutput(0);

        System.out.println(train_stream_data.count());
        System.out.println(test_stream_data.count());

        /**
         * Pipeline : 将特征工程和模型训练整合在一起
         *
         */

        Pipeline pipeline = new Pipeline()
                //特征工程
                .add(new Segment().setSelectedCol("text"))//使用中文分词器对数据进行分词
                .add(new StopWordsRemover().setSelectedCol("text"))//去除停留词
                .add(new DocCountVectorizer().setSelectedCol("text"))

                //构建算法
                .add(new NaiveBayesTextClassifier()
                        .setPredictionCol("predictionCol")
                        .setPredictionDetailCol("detailInput")
                        .setVectorCol("text")
                        .setLabelCol("label")
                );


        //将数据带入pipeline训练模型

        PipelineModel model = pipeline.fit(train_stream_data);

        //将测试集代码模型进行预测
        BatchOperator<?> transform = model.transform(test_stream_data);


        //模型评估
        BinaryClassMetrics metrics = new EvalBinaryClassBatchOp()
                .setLabelCol("label")
                .setPredictionDetailCol("detailInput")
                .linkFrom(transform)
                .collectMetrics();

        System.out.println("AUC:" + metrics.getAuc());
        System.out.println("KS:" + metrics.getKs());
        System.out.println("PRC:" + metrics.getPrc());
        System.out.println("Accuracy:" + metrics.getAccuracy());;//模型准确率
        System.out.println("Macro Precision:" + metrics.getMacroPrecision());
        System.out.println("Micro Recall:" + metrics.getMicroRecall());
        System.out.println("Weighted Sensitivity:" + metrics.getWeightedSensitivity());



        //保存模型
        model.save("alink/data/model");

        //启动任务
        BatchOperator.execute();
    }
}
