package org.sg20.searchengine.invertedindex;

import org.ansj.domain.Result;
import org.ansj.domain.Term;
import org.ansj.library.DicLibrary;
import org.ansj.splitWord.analysis.DicAnalysis;
import org.ansj.util.MyStaticValue;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.sg20.parsename.ParseNameOutputKey;

import java.io.*;

public class InvertedIndexMapper extends Mapper<LongWritable, Text, Text, ParseNameOutputKey> {

    @Override
    protected void setup(Mapper<LongWritable, Text, Text, ParseNameOutputKey>.Context context) throws IOException {
        Path nameList = new Path(context.getCacheFiles()[0]);
        FileSystem fs = FileSystem.get(context.getConfiguration());
        InputStreamReader isr = new InputStreamReader(fs.open(nameList));

        BufferedReader in = new BufferedReader(isr);
        BufferedWriter out = new BufferedWriter(new FileWriter("output/name.dic"));
        // read until end of file
        String name;
        while ((name = in.readLine()) != null) {
            // add the name to the trie tree
            out.write(name + "\tuserdefine\t2000000\n");
        }
        in.close();
        out.close();
    }

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, ParseNameOutputKey>.Context context) throws IOException, InterruptedException {
        String filename = ((FileSplit) context.getInputSplit()).getPath().getName();

        Text outkey = new Text();
        ParseNameOutputKey outval = new ParseNameOutputKey(filename, key.get());

        MyStaticValue.ENV.put(DicLibrary.DEFAULT, "output/name.dic");
        Result terms = DicAnalysis.parse(value.toString());
        for (Term term : terms) {
            outkey.set(term.getName());
            context.write(outkey, outval);
        }
    }
}
