package invertIndex;

import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Scanner;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;





public class InvertIndex {

	//Mapper<keyin ,valuein ,keyout, valueout>
	public static class Map extends Mapper<Object,Text,Text,IntWritable>{
		private String pattern = "[^\\w]";
		private final static IntWritable one = new IntWritable(1);
		private URI[] remoteFiles; // 存放停用词的资源
		private Set<String> stopWords;//停用词集合
		private Text keyInfo = new Text();
		@Override
		public void setup(Context context) throws IOException{
			Configuration conf = context.getConfiguration();
			remoteFiles = Job.getInstance(conf).getCacheFiles(); // 获取共享资源
			stopWords = new TreeSet<String>();
			
			//读入停用次加入set
			for(int i =0;i < remoteFiles.length;i++) {
				FileInputStream in = new FileInputStream(new Path(remoteFiles[i].getPath()).getName().toString());
				
				Scanner scan = new Scanner(in);
				while(scan.hasNextLine()) {
					String line = scan.nextLine();
					String[] splits = line.trim().split(" ");
					for(int k = 0;k < splits.length;k++) {
						stopWords.add(splits[k]);  // 构造停用词集合
					}
				}
			}
		}
		public void map(Object key,Text value,Context context)
				throws  IOException, InterruptedException{
			
			FileSplit inputSplit = (FileSplit) context.getInputSplit();

			 String s=inputSplit.getPath().getName();  //获取文件名
			 
			 String line = value.toString().toLowerCase();  
	            line = line.replaceAll(pattern, " ");  
	            StringTokenizer itr = new StringTokenizer(line);//这里做的是按照空格分词
	            //检验是否是停用词		
	            String temp = new String();
	            for (; itr.hasMoreTokens();) {  
	                temp = itr.nextToken();  
	                if (!stopWords.contains(temp)) {  
	                    Text word = new Text();  
	                    word.set(temp + "," + s);  
	                    context.write(word, one); //每个词计数为 1   
	                }  
	            } 


		}
	}
	
	public static class Combine extends Reducer<Text,IntWritable,Text,IntWritable>{
		public void reduce(Text key, Iterable<IntWritable> values, Context context)throws IOException, InterruptedException 
        {
            //Map结束后传进来的key为   word,test.txt   value为1
            int sum =0 ;
            for(IntWritable val:values) {
            	sum++;
            }
            IntWritable cnt = new IntWritable();
            cnt.set(sum);   // 减少<key,value>对数
            context.write(key, cnt);
        }

	}
	
	public static class NewPartitioner extends HashPartitioner<Text, IntWritable> {  
        public int getPartition(Text key, IntWritable value, int numReduceTasks) {  
        	// 蒙骗 partition word相同的会被入同一个reduce节点
            String term = new String();  
            term = key.toString().split(",")[0]; // <term,docid>=>term  
            return super.getPartition(new Text(term), value, numReduceTasks);  
        }  
    }  
	
	/*
     * reduce的输入形式为：<key: word1，doc1  value: 2>  <key: word1，doc2  value: 1> ...
     */
	//Reducer<keyin, valuein, keyout, valueout>
	
	public static class Reduce extends Reducer<Text,IntWritable,Text,Text> {
		Text currentWord = new Text(" "); // 当前的单词是那一个
		static ArrayList<String> postingList = new ArrayList<String>();  
		int total = 0;
		//reduce 每次调用，一个key ，一组value
		public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
			
            int num=0;  //该单词在所有文档中出现的总的次数
            
            String[] splits = key.toString().split(","); // splits[0] word splits[1] doc
            Text word = new Text(splits[0]);
            
           
            // 如果是新词，将前一个词的信息输出
            if(!currentWord.equals(word) && !currentWord.equals(new Text(" "))) {
            	String out = new String();
            	for(String str:postingList) {
            		out = out+ str;
            	}
            	out  = out + "<" + "total," + String.valueOf(total) + ">." ;
            	
            	// 输出 
            	context.write(currentWord, new Text(out));
            	// 清空
            	total = 0;
            	postingList = new ArrayList();
            }
            
            
            for(IntWritable val:values){
                num += val.get();   
            }
            total += num; 
            
            currentWord = word;
            String post = "<" + splits[1] + "," + String.valueOf(num)  + ">;"; //<文档，频次>；
            postingList.add(post);

		}
		
		//将最后一个单词的key-value输出
        public void cleanup(Context context) throws IOException,InterruptedException {  
            String out = new String();  
             
            for (String str : postingList) {  
                out = out + str;
            }  
            out  = out + "<" + "total," + String.valueOf(total) + ">." ;
            // 输出 
        	context.write(currentWord, new Text(out));
        }  
		
		
	}

	
	@SuppressWarnings("deprecation")
	public static void main(String[] args) throws Exception{
		  args = new String[] {"hdfs://10.102.0.198:9000/input","hdfs://10.102.0.198:9000/user/bigdata_201900170088/output"}; 
		  
		  Configuration conf = new Configuration();
		  conf.set("fs.defaultFS",  "hdfs://10.102.0.198:9000");
		  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		  if (otherArgs.length != 2) {
			  System.err.println("Usage: InvertIndex <in> <out>");
			  System.exit(2);
		  }
		  Path path =new Path(args[1]);
	      //加载配置文件 
	      FileSystem fileSystem = path.getFileSystem(conf); 
	      //输出目录若存在则删除 
	      if (fileSystem.exists(new Path(args[1]))) { 
	      	fileSystem.delete(new Path(args[1]),true); 
	      }
		  Job job = new Job(conf, "lab2");
		  job.addCacheFile(new Path("hdfs://10.102.0.198:9000/stop_words/stop_words_eng.txt").toUri());
	   
		  
		  job.setJarByClass(InvertIndex.class);
		  job.setMapperClass(Map.class);
		  job.setCombinerClass(Combine.class);
		  job.setReducerClass(Reduce.class);
		  job.setPartitionerClass(NewPartitioner.class); 
		  
		  
		  job.setMapOutputKeyClass(Text.class);  
	      job.setMapOutputValueClass(IntWritable.class);  
	          
	      job.setOutputKeyClass(Text.class);  
	      job.setOutputValueClass(Text.class); 
	      
		  FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
		  FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
		  System.exit(job.waitForCompletion(true) ? 0 : 1);

	}

}
