package product;

import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.TreeMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class toptyps {

	/**统计排名前三的省份共同拥有的农产品类型
	 * @param args
	 */
	public static void main(String[] args) throws Exception{
		if (args.length!=3) {
			System.err.println("path err!!");
			System.exit(-1);
		}
		Job job=new Job(new Configuration(), "TopThree");
		job.setJarByClass(toptyps.class);
		
		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		job.setMapperClass(SortMap.class);
		job.setReducerClass(SortReduce.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);
		
		job.waitForCompletion(true);
		
		Job job2=new Job(new Configuration(), "TopThree");
		job2.setJarByClass(toptyps.class);
		
		FileInputFormat.addInputPath(job2, new Path(args[1]));
		FileOutputFormat.setOutputPath(job2, new Path(args[2]));
		
		job2.setMapperClass(CountMap.class);
		job2.setReducerClass(CountReduce.class);
		job2.setMapOutputKeyClass(IntWritable.class);
		job2.setMapOutputValueClass(Text.class);
		job2.setOutputKeyClass(Text.class);
		job2.setOutputValueClass(Text.class);
		job2.waitForCompletion(true);
	}
	//the first group of MR
	
	public static class SortMap extends Mapper<LongWritable, Text,Text,Text>{
		protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,Text,Text>.Context context) throws java.io.IOException ,InterruptedException {
			String[] lines = value.toString().split("\t");
			if (lines.length==6) {
				String provice = lines[4].trim();
				String name = lines[0].trim();
				context.write(new Text(provice), new Text(name));
			}
		};
	}
	//map input is :<provice,producttype>
	//shuffle do : <provice,{type1,type2...}>
	public static class SortReduce extends Reducer<Text,Text,Text,Text>{
		protected void reduce(Text k2, java.lang.Iterable<Text>values, org.apache.hadoop.mapreduce.Reducer<Text,Text,Text,Text>.Context context) throws java.io.IOException ,InterruptedException {
			HashSet<String> set=new HashSet<String>();
			for (Text value : values) {
				set.add(value.toString());//add type into hashset and cleanup;
			}
			//when the foreach end, the set's length is the number of provice market products.
			StringBuffer sBuffer=new StringBuffer();
			for (String sets : set) {
				sBuffer.append(sets+",");
			}
			//when this foreach is end,all product of provice have be append into sBuffer,
			//but we need cut the last ","
			if (sBuffer.length()>0) {
				sBuffer.setLength(sBuffer.length()-1);
				//cut the last ","
			}
			context.write(k2, new Text(set.size()+"\t"+sBuffer.toString()));
		};
	}
	//this reduce make the data was :<provice.num \t listtype>
	/////////////////////////////////////////////////////////////////////////////////////////////////
	
	//the second group of MR
	public static class CountMap extends Mapper<LongWritable, Text,IntWritable,Text>{
		protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,IntWritable,Text>.Context context) throws java.io.IOException ,InterruptedException {
			String[] lines = value.toString().split("\t");
			if (lines.length==3) {
				String provice = lines[0];
				int num=Integer.parseInt(lines[1].trim());
				String prods = lines[2].trim();
				context.write(new IntWritable(num), new Text(provice+"\t"+prods));
			}
		};
	}
	//this map input values is :<num,provice\t listof>
	//shuffle :the data will be change to <num,{provice\t listof,provice\t listof...}
	public static class CountReduce extends Reducer<IntWritable, Text,Text,Text>{
		private static TreeMap<Integer, String> tm=new TreeMap<Integer,String>(
				new Comparator<Integer>() {
					@Override
					public int compare(Integer o1, Integer o2) {
						// TODO Auto-generated method stub
						return o2.compareTo(o1);//make tm's data 将序
					}
				}
				);
		protected void reduce(IntWritable k2, java.lang.Iterable<Text> values, org.apache.hadoop.mapreduce.Reducer<IntWritable,Text,Text,Text>.Context context) throws java.io.IOException ,InterruptedException {
			ArrayList<String> list=new ArrayList<String>();
			for (Text text : values) {
				list.add(text.toString());
			}
			tm.put(k2.get(),list.toString());
			//将省份\t列表添加到list中
			if (tm.size()>3) {
				tm.remove(tm.lastKey());
			}
		};
		//when the reduce sucessful ,use cleanup
		//now,tm has the top 3 provice infos
		protected void cleanup(org.apache.hadoop.mapreduce.Reducer<IntWritable,Text,Text,Text>.Context context) throws java.io.IOException ,InterruptedException {
			ArrayList<String> list=new ArrayList<String>();
			String [] prods1=null;
			String [] prods2=null;
			String [] prods3=null;
			Iterator<Integer> iterator=tm.keySet().iterator();
			int count=0;
			while (iterator.hasNext()) {
				count++;
				if (count==1) {
					prods1=tm.get(iterator.next()).toString().split("\t")[1].split(",");
				}else if (count==2) {
					prods2=tm.get(iterator.next()).toString().split("\t")[1].split(",");
				}else {
					prods3=tm.get(iterator.next()).toString().split("\t")[1].split(",");
				}
			}
			for (int i = 0; i < prods3.length; i++) {
				for (int j = 0; j < prods3.length; j++) {
					for (int k = 0; k < prods3.length; k++) {
						if (prods1[i].equals(prods2[j]) && prods2[j].equals(prods3[k])) {
							list.add(prods1[i]);
						}
					}
				}
			}
			context.write(new Text("共同拥有的农产品类型："),new Text(list.toString()));
		};
	}
}
