package com.example.stock;

import com.opencsv.CSVParserBuilder;
import com.opencsv.CSVParser;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.*;
import java.net.URI;
import java.util.*;
import java.util.regex.Pattern;

public class StockTopN {

    public static class TokenizerMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
        private static final IntWritable ONE = new IntWritable(1);
        private static final Pattern NON_ALPHA = Pattern.compile("[^a-z]+");
        private final Text outWord = new Text();
        private final Set<String> stop = new HashSet<>();
        private int labelFilter = 1;
        private CSVParser parser;

        @Override
        protected void setup(Context ctx) throws IOException {
            Configuration conf = ctx.getConfiguration();
            labelFilter = conf.getInt("label.filter", 1);
            parser = new CSVParserBuilder().withSeparator(',')
                    .withQuoteChar('"').withEscapeChar('\\').build();

            URI[] caches = ctx.getCacheFiles();
            if (caches != null) {
                for (URI u : caches) {
                    if ("stopwords.txt".equals(u.getFragment())) {
                        try (BufferedReader br = new BufferedReader(new FileReader("stopwords.txt"))) {
                            String line;
                            while ((line = br.readLine()) != null) {
                                line = line.trim().toLowerCase();
                                if (!line.isEmpty()) stop.add(line);
                            }
                        }
                    }
                }
            }
        }

        @Override
        protected void map(LongWritable key, Text value, Context ctx) throws IOException, InterruptedException {
            String line = value.toString();
            if (line.trim().isEmpty()) return;

            String[] fields;
            try {
                fields = parser.parseLine(line);
            } catch (Exception e) {
                return;
            }
            if (fields.length < 2) return;

            String title = fields[0];
            String labelStr = fields[fields.length - 1].trim();

            if ("text".equalsIgnoreCase(title) || "sentiment".equalsIgnoreCase(labelStr)) return;

            int label;
            try {
                label = Integer.parseInt(labelStr);
            } catch (NumberFormatException e) {
                return;
            }
            if (label != labelFilter) return;

            String cleaned = NON_ALPHA.matcher(title.toLowerCase()).replaceAll(" ").trim();
            if (cleaned.isEmpty()) return;

            for (String w : cleaned.split("\\s+")) {
                if (w.isEmpty()) continue;
                if (stop.contains(w)) continue;
                outWord.set(w);
                ctx.write(outWord, ONE);
            }
        }
    }

    public static class SumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        protected int sumValues(Iterable<IntWritable> vals) {
            int s = 0;
            for (IntWritable v : vals) s += v.get();
            return s;
        }
    }

    public static class TopNReducer extends SumReducer {
        private static class Pair { String word; int cnt; Pair(String w, int c){word=w;cnt=c;} }
        private final int N = 100;
        private PriorityQueue<Pair> minHeap;
        private final Map<String, Integer> buffer = new HashMap<>();

        @Override
        protected void setup(Context ctx) { minHeap = new PriorityQueue<>(Comparator.comparingInt(p -> p.cnt)); }

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context ctx) {
            int c = sumValues(values);
            buffer.put(key.toString(), c);
        }

        @Override
        protected void cleanup(Context ctx) throws IOException, InterruptedException {
            for (Map.Entry<String, Integer> e : buffer.entrySet()) {
                Pair p = new Pair(e.getKey(), e.getValue());
                if (minHeap.size() < N) minHeap.offer(p);
                else if (p.cnt > Objects.requireNonNull(minHeap.peek()).cnt) {
                    minHeap.poll(); minHeap.offer(p);
                }
            }
            List<Pair> list = new ArrayList<>(minHeap);
            list.sort((a,b)->Integer.compare(b.cnt,a.cnt));
            for (Pair p : list) ctx.write(new Text(p.word), new IntWritable(p.cnt));
        }
    }

    public static void main(String[] args) throws Exception {
        if (args.length != 4) {
            System.err.println("Usage: StockTopN <input_csv> <stopwords_hdfs> <output_dir> <label(1|-1)>");
            System.exit(1);
        }
        Configuration conf = new Configuration();
        conf.setInt("label.filter", Integer.parseInt(args[3]));

        Job job = Job.getInstance(conf, "Stock Title TopN (" + args[3] + ")");
        job.setJarByClass(StockTopN.class);

        job.addCacheFile(new URI(args[1] + "#stopwords.txt"));

        job.setMapperClass(TokenizerMapper.class);
        job.setCombinerClass(SumReducer.class);
        job.setReducerClass(TopNReducer.class);
        job.setNumReduceTasks(1);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[2]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
