package com.mango.ch15;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * 解决流程
 * mapper接受一个微博，然后规范化文本
 * 1 查找其中的关键字，如 orbma 或者Romney
 * 2 统计文本中的额正面和负面关键字个数
 * 3 然后用正面词汇比例减去负面词汇比列
 * <p>
 * <p>
 * 正面词汇集  和负面词汇集 的数据可以一开始初始化job之后存入分布式缓冲器（DistributedCache)
 * 符号链接
 * <p>
 * 每个存储在HDFS中的文件被放到缓存中后都可以通过一个符号链接使用。
 * <p>
 * URI hdfs://namenode/test/input/file1#myfile 你可以在程序中直接使用myfile来访问 file1这个文件。 myfile是一个符号链接文件。
 */
public class EmotionJob extends Configured implements Tool {
    private static String input = "";
    private static String output = "";
    private static String initModel = "";

    public static void UseDistributedCacheBySymbolicLink() throws Exception {

        FileReader reader = new FileReader("hdfs://mail.py");

        BufferedReader br = new BufferedReader(reader);

        String s = null;

        while ((s = br.readLine()) != null) {

            System.out.println(s);

        }

        br.close();

        reader.close();

    }

    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = getConf();

        //设置分布式缓存插件
        // 用户可以通过 DistributedCache.createSymlink(Configuration)方法让DistributedCache
        // 在当前工作目录下创建到缓存文件的符号链接。
        DistributedCache.createSymlink(conf);
        String path = "/tmp/test/mail.py";
        Path filePath = new Path(path);
        //创建了如下这个链接 则在程序中可以直接使用 hdfs://mail.py   不用担心 它的真实路径在哪里
        String uriWithLink = filePath.toUri().toString() + "#" + "mail.py";
        DistributedCache.addCacheFile(new URI(uriWithLink), conf);
        // Path p = new Path("/tmp/hadoop-0.20.2-capacity-scheduler.jar#hadoop-0.20.2-capacity-scheduler.jar");
        // DistributedCache.addArchiveToClassPath(p, conf);

        Job job = Job.getInstance(conf);
        job.setJarByClass(this.getClass());
        job.setMapperClass(EMapper.class);
        job.setReducerClass(EReducer.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(DoubleWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(input));
        FileOutputFormat.setOutputPath(job, new Path(output));

        job.waitForCompletion(true);
        if (job.isComplete())
            System.out.println("Job 运行结束");

        return 0;
    }

    static class EMapper extends Mapper<LongWritable, Text, Text, DoubleWritable> {
        private Set<String> postiveWords = null;
        private Set<String> negativeWords = null;
        private Set<String> allCandidates = new HashSet<>();

        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            //初始化统一的额词
            //初始化不同意的词汇

            allCandidates.add("obama");
            allCandidates.add("romney");
            System.out.println("Now, use the distributed cache and syslink");

            try {

                UseDistributedCacheBySymbolicLink();

            } catch (Exception e) {

                e.printStackTrace();

            }
        }

        /**
         * @param key     发微博日期
         * @param value   微博内容
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String tocks[] = value.toString().split("\t");
            String tokens[] = tocks[1].split(" ");
            String data = tocks[0];
            List<String> toks = Arrays.asList(tokens);
            for (String candidate :
                    allCandidates) {
                if (toks.contains(candidate)) {
                    int postiveCount = getPostiveWordsCount(toks);
                    int negativeCount = getNagativeWordsCount(toks);
                    double posiveRation = postiveCount / toks.size();
                    double negativeRation = negativeCount / toks.size();
                    String outKeyText = data.concat(",").concat(candidate);
                    Text outKey = new Text(outKeyText);
                    DoubleWritable outValue = new DoubleWritable(posiveRation - negativeRation);
                    context.write(outKey, outValue);
                }
            }
        }

        /**
         * 在已知的正面数据集中查找 本行数据中有多少个正面的单词
         *
         * @param toks 一行数据
         * @return
         */
        private int getPostiveWordsCount(List<String> toks) {
            return 0;
        }

        /**
         * 在已知的负面数据集中查找 本行数据中有多少个负面的单词
         *
         * @param toks
         * @return
         */
        private int getNagativeWordsCount(List<String> toks) {
            return 0;
        }
    }

    static class EReducer extends Reducer<Text, DoubleWritable, Text, Text> {
        /**
         * @param key     key 为pair（date，String）  date=yyyy-MM-dd  String =一个候选人
         * @param values  List<Double>  double 表示一个比例
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void reduce(Text key, Iterable<DoubleWritable> values, Context context) throws IOException, InterruptedException {
            double sumOfRatio = 0.0;
            int n = 0;//比例
            for (DoubleWritable dw :
                    values) {
                n++;
                sumOfRatio += dw.get();

            }
            if (sumOfRatio > 0)
                context.write(key, new Text(sumOfRatio / n + " 是正面的"));
            else if (sumOfRatio == 0)
                context.write(key, new Text(sumOfRatio / n + " 是中立的"));
            else context.write(key, new Text(sumOfRatio / n + " 是负面的"));
        }
    }

    public static void main(String[] args) {
        try {
            ToolRunner.run(new Configuration(), new EmotionJob(), args);
        } catch (Exception e) {
            System.out.println("Job 运行失败");
            e.printStackTrace();
        }

    }
}
