package com.mango.ch14;

import com.mango.ch05.PairOfWords;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.broadcast.Broadcast;
import scala.Tuple2;

import java.util.*;

public class Spark_NBCJob {
    private static String initClassifyFile = "d:/HadoopData/input/f14";
    private static String input = "d:/HadoopData/input/f14-2";
    private static String output = "d:/HadoopData/output";

    public static void main(String[] args) {
        SparkConf sconf = new SparkConf();
        sconf.setAppName("NBC");
        sconf.setMaster("local");
        JavaSparkContext jsc = new JavaSparkContext(sconf);
        JavaRDD<String> train = jsc.textFile(initClassifyFile, 1);
        //得到训练数据的总条数
        long trainSize = train.count();

        JavaPairRDD<Tuple2<String, String>, Integer> pairs = train.flatMapToPair(new PairFlatMapFunction<String, Tuple2<String, String>, Integer>() {
            @Override
            public Iterator<Tuple2<Tuple2<String, String>, Integer>> call(String s) throws Exception {
                List<Tuple2<Tuple2<String, String>, Integer>> result = new ArrayList<>();
                String[] tokens = s.split(",");
//                Sunny,Hot,High,Weak,No
                //tokens[0] A1 tokens[1] A2 tokens[3] A3 ....
                int classficationIndex = tokens.length - 1;//类别属性的下标
                String theClassfication = tokens[classficationIndex];//类别
                for (int i = 0; i < classficationIndex - 1; i++) {
                    Tuple2<String, String> k = new Tuple2<>(tokens[i], theClassfication);
                    result.add(new Tuple2<>(k, 1));
                }
                Tuple2<String, String> K = new Tuple2<>("CLASS", theClassfication);
                result.add(new Tuple2<>(K, 1));
                return result.iterator();
            }
        });
        pairs.saveAsTextFile(output + "/1");
        //对训练数据规约，以便下一步统计我们的需要的概率
        JavaPairRDD<Tuple2<String, String>, Integer> counts = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        counts.saveAsTextFile(output + "/2");
        //统计我们所需的各项概率
        Map<Tuple2<String, String>, Integer> collectAsMap = counts.collectAsMap();
        Map<Tuple2<String, String>, Double> pt = getPT(collectAsMap, trainSize);
        //将其保存到hdfs集群中去  永久存储  此处暂未存入集群 值是写到本地文件
        JavaPairRDD<PairOfWords, DoubleWritable> pairOfWordsDoubleWritableJavaPairRDD = jsc.parallelizePairs(toWritableList(pt));
//        pairOfWordsDoubleWritableJavaPairRDD.saveAsTextFile(output+"/4");
        //阶段二 预测分类
        //将分类器组件存进 广播，所有的节点都可以使用
        Broadcast<Map<Tuple2<String, String>, Double>> ptBroadcast = jsc.broadcast(pt);

        //对新数据分类
        JavaRDD<String> newLines = jsc.textFile(input);
        JavaPairRDD<String, String> clasfied = newLines.mapToPair(new PairFunction<String, String, String>() {
            @Override
            public Tuple2<String, String> call(String s) throws Exception {
                Map<Tuple2<String, String>, Double> pts = ptBroadcast.getValue();
                String[] tokens = s.split(",");
                String[] theClassfications = {"Yes", "No"};
                double max = Double.MIN_VALUE;
                Tuple2<String, String> resultTup = null;
                for (int i = 0; i < theClassfications.length; i++)
                    for (int j = 0; j < tokens.length; j++) {
                        String theClassfication = theClassfications[i];
                        Tuple2<String, String> classKey = new Tuple2<>("CLASS", theClassfication);
                        double PAclass = pts.get(classKey);
                        double result = 1;
                        Tuple2<String, String> key = new Tuple2<>(tokens[i], theClassfication);
                        result *= pts.get(key);
                        if (result > max) {
                            max = result;
                            resultTup = new Tuple2<>(s, theClassfication);
                        }
                    }
                return resultTup;
            }
        });
        clasfied.saveAsTextFile(output + "/3");
        System.out.println("Job 执行完成");
        jsc.close();
    }

    public static <K, V> void parallelizePairs(List<Tuple2<K, V>> list, Map<Tuple2<String, String>, Double> pt, JavaSparkContext jsc) {
        List<Tuple2<PairOfWords, DoubleWritable>> tuple2s = toWritableList(pt);
        JavaPairRDD<PairOfWords, DoubleWritable> pairRDD = jsc.parallelizePairs(tuple2s);
        pairRDD.saveAsHadoopFile(output, PairOfWords.class, DoubleWritable.class, SequenceFileOutputFormat.class);

    }

    static List<Tuple2<PairOfWords, DoubleWritable>> toWritableList(Map<Tuple2<String, String>, Double> PT) {
        List<Tuple2<PairOfWords, DoubleWritable>> list = new ArrayList<>();
        for (Map.Entry<Tuple2<String, String>, Double> entry :
                PT.entrySet()) {
            list.add(new Tuple2<PairOfWords, DoubleWritable>(new PairOfWords(entry.getKey()._1, entry.getKey()._2), new DoubleWritable(entry.getValue())));

        }
        return list;
    }

    static Map<Tuple2<String, String>, Double> getPT(Map<Tuple2<String, String>, Integer> sors, double trainSize) {

        Map<Tuple2<String, String>, Double> tmpMaps = new HashMap<>();
        for (Map.Entry<Tuple2<String, String>, Integer> entry :
                sors.entrySet()) {
            Tuple2<String, String> k = entry.getKey();
            if (k._1().equals("CLASS")) {
                tmpMaps.put(k, entry.getValue() / trainSize);
            }
        }
        for (Map.Entry<Tuple2<String, String>, Integer> entry :
                sors.entrySet()) {
            Tuple2<String, String> k = entry.getKey();
            String classfication = k._2;
            if (!k._1().equals("CLASS")) {
                //获取当前分类的总条数
                Tuple2<String, String> k2 = new Tuple2<>("CLASS", classfication);
                double claCount = sors.get(k2);
                tmpMaps.put(k, (entry.getValue() / claCount));
            }
        }
        System.out.println("PT" + tmpMaps);
        return tmpMaps;

    }
}
