package wwj;

import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 



import wwj.entity.TupleEntity;
import wwj.entity.TupleEntity;
import wwj.reader.CsvReader;
import wwj.reader.TabReader;

/*
 *   hadoop jar /home/hadoop/桌面/boot.jar UsersMatrixParse1 /work/output/UsersMatrixItem/8/part-r-00000 /work/output/UserLikeNum/1/part-r-00000 /work/output/UsersMatrixParse1/4

 */
public class UsersMatrixParse1 {

	public static class MMapper extends
			Mapper<Object, Text, Text, TupleEntity> {

		public void map(Object key, Text value, Context context)
				throws IOException, InterruptedException {
			List<String> list = TabReader
					.readLine(value.toString());
			if(list.size()==3){
				TupleEntity entity=new TupleEntity();
				entity.setFlag(0);
				entity.setText1(list.get(0));
				entity.setText2(list.get(1));
				context.write(new Text(list.get(0)), entity);
				context.write(new Text(list.get(1)), entity);
			}else{
				TupleEntity entity=new TupleEntity();
				entity.setFlag(1);
				entity.setText1(list.get(0));
				entity.setInt1(Integer.parseInt(list.get(1)));
				context.write(new Text(list.get(0)), entity);
			}
		}
	}

	public static class MReducer extends
			Reducer<Text, TupleEntity, Text, Text> {

		public void reduce(Text key, Iterable<TupleEntity> values,
				Context context) throws IOException,
				InterruptedException {
			int q=0;
			String msg="";
			List<TupleEntity> list=new ArrayList<TupleEntity>();
			for(TupleEntity val:values){
				 if(val.getFlag().get()==1) {
					 q=val.getInt1().get();
				 }
				//此处hadoop略坑人....
				list.add(TupleEntity.copy(val));
				 
			}
			Map<String,Integer> map=new HashMap<>();
			
			for(int i=0;i<list.size();i++){
				TupleEntity val=list.get(i);
				if(val.getFlag().get()==1) continue;
				String str1=val.getText1().toString();
				String str2=val.getText2().toString();
				 
				if(str2.equals(key.toString())){
					if(map.containsKey(str1)) map.put(str1, map.get(str1)+1);
					else map.put(str1, 1);
				}else{
					if(map.containsKey(str2)) map.put(str2, map.get(str2)+1);
					else map.put(str2, 1);
				}
			}
			
			for(String k:map.keySet()){
				Integer v=map.get(k);
				if(k.compareTo(key.toString())<0){
					context.write(new Text(k+"\t"+key.toString()+"\t"+v ),new Text(q+"") );
				}else {
					context.write(new Text(key.toString()+"\t"+k+"\t"+v ),new Text(q+"") );
				}
			}
			 

		}
	}

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		String[] otherArgs = args;
		if (otherArgs.length < 2) {
			System.err.println("Usage:  <in> [<in>...] <out>");
			System.exit(2);
		}
		Job job = new Job(conf, "UsersMatrixParse1");
		job.setJarByClass(UsersMatrixParse1.class);
		 
		job.setMapperClass(MMapper.class);
		 
		job.setReducerClass(MReducer.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(TupleEntity.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);
		for (int i = 0; i < otherArgs.length - 1; ++i) {
			FileInputFormat.addInputPath(job,
					new Path(otherArgs[i]));
		}
		FileOutputFormat.setOutputPath(job, new Path(
				otherArgs[otherArgs.length - 1]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}
