package chen.bupt.mapreduce.tfidf;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import chen.bupt.constant.Constants;
import chen.bupt.text.TextClear;
import chen.bupt.text.TextUtils;
import chen.bupt.util.HDFSFileUtils;

public class TFIDFJob extends Configured implements Tool {
	private static String inputPath = Constants.SEG_PATH;
	private static String outputPath = Constants.TFIDF_PATH;
	private static String TMP1 = Constants.TMP1;

	/**
	 * input: doc;term1 term2 term3<br>
	 * output: term doc;termsum;doctermsum<br>
	 * 
	 * @author forrest
	 * 
	 */
	public static class Map1 extends Mapper<LongWritable, Text, Text, Text> {

		public void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			if (!HDFSFileUtils.isInputFile(context))
				return;
			String keyCont[] = TextUtils.splitTextToCont(value.toString(), ";",
					7);
			if (keyCont == null || keyCont.length != 2 || keyCont[1] == null) {
				return;
			}
			String content[] = keyCont[1].split(" ");
			int termSum = 0;
			Map<String, Integer> map = new HashMap<String, Integer>();
			for (String term : content) {
				if (!"".equals(term)) {
					termSum++;
					if (map.containsKey(term)) {
						map.put(term, map.get(term) + 1);
					} else {
						map.put(term, 1);
					}
				}
			}
			for (String term : map.keySet()) {
				context.write(new Text(term), new Text(keyCont[0] + ";"
						+ map.get(term) + ";" + termSum));
			}
		}

	}

	/**
	 * input: term doc;termsum;doctermsum<br>
	 * output: doc term;tfidf
	 * 
	 * @author forrest
	 * 
	 */
	public static class Reduce1 extends Reducer<Text, Text, Text, Text> {
		protected void reduce(Text key, Iterable<Text> values, Context context)
				throws IOException, InterruptedException {
			// document frequency
			int DF = 0;
			int recordNum = Constants.RECORD_NUM;
			Map<String, String> map = new HashMap<String, String>();
			for (Text value : values) {
				DF++;
				String s[] = TextUtils
						.splitTextToCont(value.toString(), ";", 7);
				map.put(s[0], s[1]);
			}
			// 如果是名字且少于5次出现，判定不是名人
			String term = key.toString();
			if (term.contains("/nr")) {
				if (DF < 5) {
					return;
				} else {
					term = TextClear.substring(term, "/nr");
				}
			}
			for (String doc : map.keySet()) {
				String stat[] = map.get(doc).split(";");
				double tf = (double) Integer.parseInt(stat[0])
						/ (double) Integer.parseInt(stat[1]);
				double idf = (double) Math.log10((double) recordNum
						/ (double) DF);
				double tfidf = tf * idf;
				context.write(new Text(doc), new Text(term + ";" + tfidf));
			}
		}
	}

	/**
	 * input: doc term;tfidf <br>
	 * output: doc term;tfidf
	 * 
	 */
	public static class Map2 extends Mapper<LongWritable, Text, Text, Text> {

		public void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			if (!HDFSFileUtils.isInputFile(context))
				return;
			String[] wordFileScore = value.toString().split("\t");
			context.write(new Text(wordFileScore[0]),
					new Text(wordFileScore[1]));
		}
	}

	/**
	 * input: doc term;tfidf <br>
	 * 
	 * @author forrest
	 * 
	 */
	public static class Reduce2 extends Reducer<Text, Text, Text, NullWritable> {

		protected void reduce(Text key, Iterable<Text> values, Context context)
				throws IOException, InterruptedException {

			StringBuilder sb = new StringBuilder();
			for (Text value : values) {
				sb.append("|" + value.toString());
			}

			context.write(new Text(key.toString() + sb.toString()),
					NullWritable.get());
		}
	}

	public int run(String[] args) throws Exception {
		Configuration conf1 = getConf();
		Path input = new Path(inputPath);
		Path Tmp1 = new Path(TMP1);
		Path output = new Path(outputPath);
		HDFSFileUtils.deleteFile(Tmp1, conf1);
		HDFSFileUtils.deleteFile(output, conf1);

		conf1.set("mapred.child.tmp", "/tmp/child");
		conf1.set("mapred.task.timeout", "0");
		Job job1 = new Job(conf1, "tfidf1");
		job1.setJarByClass(TFIDFJob.class);
		job1.setMapperClass(Map1.class);
		job1.setReducerClass(Reduce1.class);
		job1.setMapOutputKeyClass(Text.class);
		job1.setMapOutputValueClass(Text.class);
		job1.setOutputKeyClass(Text.class);
		job1.setOutputValueClass(Text.class);
		FileInputFormat.addInputPath(job1, input);
		FileOutputFormat.setOutputPath(job1, Tmp1);
		job1.waitForCompletion(true);

		Configuration conf2 = getConf();
		conf2.set("mapred.child.tmp", "/tmp/child");
		conf2.set("mapred.task.timeout", "0");
		Job job2 = new Job(conf2, "tfidf12");
		job2.setMapOutputKeyClass(Text.class);
		job2.setMapOutputValueClass(Text.class);
		job2.setJarByClass(TFIDFJob.class);
		job2.setMapperClass(Map2.class);
		job2.setReducerClass(Reduce2.class);
		job2.setOutputKeyClass(Text.class);
		job2.setOutputValueClass(NullWritable.class);
		FileInputFormat.addInputPath(job2, Tmp1);
		FileOutputFormat.setOutputPath(job2, output);
		job2.waitForCompletion(true);
		HDFSFileUtils.deleteFile(Tmp1, conf2);
		return 0;
	}

	public static void main(String[] args) throws Exception {
		ToolRunner.run(new Configuration(), new TFIDFJob(), args);
	}
}