package com.nightsoul.hadoop1.test.sort;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.MapFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.MapFileOutputFormat;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.lib.HashPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import com.nightsoul.hadoop1.test.OldJobBuilder;
import com.nightsoul.hadoop1.test.junit.NcdcRecordParser;

public class LookupRecordByTemperature extends Configured implements Tool {

	@Override
	public int run(String[] args) throws Exception {
		if(args.length!=2) {
			OldJobBuilder.printUsage(this, "<path> <key>");
			return -1;
		}
		Path path = new Path(args[0]);
		IntWritable key = new IntWritable(Integer.parseInt(args[1]));
		FileSystem fs = path.getFileSystem(getConf());
		Reader[] readers = MapFileOutputFormat.getReaders(fs, path, getConf());
		
		Partitioner<IntWritable, Text> partitioner = new HashPartitioner<>();
		Text val = new Text();
		//调用getReaders方法时，如果直接使用作业输出的MapFile，因其目录中还含有_SUCCESS文件和logs目录，也会当成MapFile，所以会出错
		//故应该清除_SUCCESS文件和logs目录
		Writable entry = MapFileOutputFormat.getEntry(readers, partitioner, key, val);
		System.out.println(entry.getClass());//为什么.getCass会出警告
		
		if(entry==null) {
			System.err.println("Key not found: " + key);
			return -1;
		}
		
		NcdcRecordParser parser = new NcdcRecordParser();
		parser.parse(val);
		System.out.printf("%s\t%s\n", parser.getStationId(), parser.getYear());
		return 0;
	}

	public static void main(String[] args) throws Exception {
		int exitCode = ToolRunner.run(new LookupRecordByTemperature(), args);
		System.exit(exitCode);
	}
}
