package com.example.hadoop;

import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class TitleCount {

    public static class TokenizerMapper
            extends Mapper<Object, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        private Text wordWithLabel = new Text();
        private Set<String> stopWords = new HashSet<>();

        // 初始化：读取停词文件
        @Override
        protected void setup(Context context) throws IOException {
            Configuration conf = context.getConfiguration();
            String stopwordPath = conf.get("stopword.path");
            if (stopwordPath != null) {
                Path path = new Path(stopwordPath);
                try (BufferedReader br = new BufferedReader(
                        new InputStreamReader(path.getFileSystem(conf).open(path)))) {
                    String line;
                    while ((line = br.readLine()) != null) {
                        stopWords.add(line.trim().toLowerCase());
                    }
                }
            }
        }

        // 处理每一行输入
        @Override
        public void map(Object key, Text value, Context context)
                throws IOException, InterruptedException {

            String line = value.toString().trim();
            if (line.isEmpty() || line.startsWith("Text")) return; // 跳过表头

            int lastComma = line.lastIndexOf(",");
            if (lastComma == -1) return;

            // 提取文本与情感标签
            String text = line.substring(0, lastComma).replaceAll("\"", "");
            String label = line.substring(lastComma + 1).trim();

            // 清洗文本：忽略大小写、数字、标点
            text = text.toLowerCase();
            text = text.replaceAll("[^a-z ]", " ");

            StringTokenizer itr = new StringTokenizer(text);
            while (itr.hasMoreTokens()) {
                String token = itr.nextToken().trim();
                if (token.isEmpty() || stopWords.contains(token) || token.length() == 1) continue;
                wordWithLabel.set(token + "\t" + label);
                context.write(wordWithLabel, one);
            }
        }
    }


    // 分别统计并排序正面负面的标题单词
    
    public static class SumReducer
        extends Reducer<Text, IntWritable, Text, IntWritable> {

    private MultipleOutputs<Text, IntWritable> mos;
    private Map<String, Integer> positiveMap = new HashMap<>();
    private Map<String, Integer> negativeMap = new HashMap<>();

    @Override
    protected void setup(Context context) {
        mos = new MultipleOutputs<>(context);
    }

    @Override
    public void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {

        int sum = 0;
        for (IntWritable val : values) {
            sum += val.get();
        }

        String[] parts = key.toString().split("\t");
        if (parts.length != 2) return;

        String word = parts[0];
        String label = parts[1];

        if (label.equals("1")) {
            positiveMap.put(word, positiveMap.getOrDefault(word, 0) + sum);
        } else if (label.equals("-1")) {
            negativeMap.put(word, negativeMap.getOrDefault(word, 0) + sum);
        }
    }

    @Override
    protected void cleanup(Context context)
            throws IOException, InterruptedException {

        // 排序 + 取前100
        writeTop100(positiveMap, "positive");
        writeTop100(negativeMap, "negative");
        mos.close();
    }

    private void writeTop100(Map<String, Integer> map, String name)
            throws IOException, InterruptedException {
        map.entrySet().stream()
                .sorted((a, b) -> b.getValue().compareTo(a.getValue()))
                .limit(100)
                .forEach(entry -> {
                    try {
                        mos.write(name, new Text(entry.getKey()), new IntWritable(entry.getValue()));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                });
    }
}


    // 主函数
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

        if (otherArgs.length != 3) {
            System.err.println("Usage: TitleCount <input> <output> <stopword-file>");
            System.exit(2);
        }

        conf.set("stopword.path", otherArgs[2]);

        Job job = Job.getInstance(conf, "title count");
        job.setJarByClass(TitleCount.class);
        job.setMapperClass(TokenizerMapper.class);
        job.setReducerClass(SumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        MultipleOutputs.addNamedOutput(job, "positive", TextOutputFormat.class, Text.class, IntWritable.class);
        MultipleOutputs.addNamedOutput(job, "negative", TextOutputFormat.class, Text.class, IntWritable.class);

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
