package cn.pengpeng.day02.wc;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class WordCountMaper {
	public static void main(String[] args) throws Exception {
		int taskId = Integer.parseInt(args[0]);
		String file = args[1];
		long startOffset = Long.parseLong(args[2]);
		long length = Long.parseLong(args[3]);
		
		FileSystem fs = FileSystem.get(new URI("hdfs://bigdata01:9000"),new Configuration(),"root");
		FSDataInputStream in = fs.open(new Path(file));
		
		//定位到要读的数据
		in.seek(startOffset);
		BufferedReader br = new BufferedReader(new InputStreamReader(in));
		
		//输出文件
		FSDataOutputStream tmpOut_0 = fs.create(new Path("/wordcount/tmp/part-m"+taskId+"-0"));
		FSDataOutputStream tmpOut_1 = fs.create(new Path("/wordcount/tmp/part-m"+taskId+"-1"));
		
		if(taskId!=0){
			br.readLine();
		}
		
		String line = null;
		long count = 0;
		while((line=br.readLine())!=null){
			String[] words = line.split(" ");
			for (String word : words) {
				if(word.hashCode()%2 == 0){
					tmpOut_0.write((word+"\t"+1+"\n").getBytes());
				}else{
					tmpOut_1.write((word+"\t"+1+"\n").getBytes());
				}
			}
			
			/**
			 * 总是超过界限多读一行
			 */
			count +=line.length()+1;
			if(count>length){
				break;
			}
		}
		br.close();
		tmpOut_0.close();
		tmpOut_1.close();
		fs.close();
	}

}
