package com.shujia.train;

import com.alibaba.alink.common.MLEnvironmentFactory;
import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.dataproc.SplitBatchOp;
import com.alibaba.alink.operator.batch.evaluation.EvalBinaryClassBatchOp;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.common.evaluation.BinaryClassMetrics;
import com.alibaba.alink.pipeline.Pipeline;
import com.alibaba.alink.pipeline.PipelineModel;
import com.alibaba.alink.pipeline.classification.NaiveBayesTextClassifier;
import com.alibaba.alink.pipeline.nlp.DocCountVectorizer;
import com.alibaba.alink.pipeline.nlp.Segment;
import com.alibaba.alink.pipeline.nlp.StopWordsRemover;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.configuration.Configuration;

public class NaiveBayes {
    public static void main(String[] args) throws Exception {
        String SCHEMA_STR = "label double, text string";

        // 通过alink的方式获取flink运行环境
        ExecutionEnvironment env = MLEnvironmentFactory.getDefault().getExecutionEnvironment();

        //获取配置文件对象
        Configuration configuration = env.getConfiguration();
        //这种ack超时时间
        configuration.setString("akka.ask.timeout", "1000 s");

        //数据源 source
        BatchOperator data = new CsvSourceBatchOp()
                .setFilePath("data/train.txt")
                .setSchemaStr(SCHEMA_STR)
                .setFieldDelimiter("\t");


        //i二分测试集训练集
        SplitBatchOp splitBatchOp = new SplitBatchOp().setFraction(0.8);
        splitBatchOp.linkFrom(data);

        //获取测试集
        BatchOperator testData = splitBatchOp.getSideOutput(0);

        System.out.println("训练集数据量：" + splitBatchOp.count());
        System.out.println("测试集数据量：" + testData.count());


        /*
         * Pipeline  机器学习流程
         * Segment 中文分词（内部使用的是jieba分词器）
         * StopWordsRemover  取出停留词
         * DocCountVectorizer 转换成词典向量 加上tf-idf
         * NaiveBayesTextClassifier 分也是分类算法
         */

        Pipeline pipeline = new Pipeline()
                .add(new Segment().setSelectedCol("text"))
                .add(new StopWordsRemover().setSelectedCol("text"))
                .add(new DocCountVectorizer().setSelectedCol("text"))
                .add(new NaiveBayesTextClassifier()
                        .setPredictionCol("prediction")
                        .setPredictionDetailCol("predictionDetail")
                        .setVectorCol("text")
                        .setLabelCol("label")
                );


        //训练模型，执行流程
        PipelineModel model = pipeline.fit(splitBatchOp);

        //测试集测试
        BatchOperator<?> transform = model.transform(testData);


        //模型评估
        BinaryClassMetrics metrics = new EvalBinaryClassBatchOp()
                .setLabelCol("label")
                .setPredictionDetailCol("predictionDetail")
                .linkFrom(transform)
                .collectMetrics();

        //保存模型
        model.save("data/model");

        //启动任务
        BatchOperator.execute();

        System.out.println("AUC:" + metrics.getAuc());
        System.out.println("KS:" + metrics.getKs());
        System.out.println("PRC:" + metrics.getPrc());
        System.out.println("Accuracy:" + metrics.getAccuracy());//模型准确率
        System.out.println("Macro Precision:" + metrics.getMacroPrecision());
        System.out.println("Micro Recall:" + metrics.getMicroRecall());
        System.out.println("Weighted Sensitivity:" + metrics.getWeightedSensitivity());
    }
}
