package cn.pengpeng.day02.wc;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;

public class WordCountReducer {
	public static void main(String[] args) throws Exception {
		//用来表示哪一个reducetask，计算哪些文件
		int taskId = Integer.parseInt(args[0]);
		
		FileSystem fs = FileSystem.get(new URI("hdfs://bigdata01:9000"), new Configuration(), "root");
		
		RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new Path("/wordcount/tmp/"), false);
		Map<String, Integer> map = new HashMap<>();
		while(iter.hasNext()){
			LocatedFileStatus file = iter.next();
			
			//判断是否是自己需要计算的文件
			if(file.getPath().getName().endsWith("-"+taskId)){
				//读文件
				FSDataInputStream open = fs.open(file.getPath());
				BufferedReader br = new BufferedReader(new InputStreamReader(open));
				String line = null;
				while((line = br.readLine())!=null){
					//切分字段
					String[] split = line.split("\t");
					if(map.containsKey(split[0])){
						Integer count = map.get(split[0]);
						count++;
						map.put(split[0], count);
					}else{
						map.put(split[0], 1);
					}					
				}
				br.close();
				open.close();
			}
		}
		
		//将结果写入到hdfs上
		FSDataOutputStream outputStream = fs.create(new Path("/wordcount/output/part-r-"+taskId));
		Set<Entry<String,Integer>> entrySet = map.entrySet();
		for (Entry<String, Integer> entry : entrySet) {
			outputStream.write((entry.getKey()+":"+entry.getValue()+"\n").getBytes());
			
		}
		outputStream.close();
		fs.close();
	}

}
