/**
 * 测试SequenceFile的API
 */
package com.tjh.hadoop.file;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;

/**
 * Hello world!
 *
 */
public class TestSequenceFile 
{
	@SuppressWarnings("deprecation")
	@Test
    public void write() throws Exception {
    	Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		// 配置 key-value对
		Writer writer = SequenceFile.createWriter(fs , conf, name , IntWritable.class, Text.class);
		
		// 写入数据
		writer.append(new IntWritable(1), new Text("tom1"));
		writer.append(new IntWritable(2), new Text("tom2"));
		writer.append(new IntWritable(3), new Text("tom3"));
		writer.append(new IntWritable(4), new Text("tom4"));
		
		writer.close();
		System.out.println("over!");
    }
	/**
	 * 改进写入方式
	 * 循环写入
	 */
	@SuppressWarnings("deprecation")
	@Test
    public void write2() throws Exception {
    	Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		// 配置 key-value对
		Writer writer = SequenceFile.createWriter(fs , conf, name , IntWritable.class, Text.class);
		IntWritable key = new IntWritable();
		Text value = new Text();
		
		// 写入数据
		for(int i=0; i<100; i++) {
			key.set(i);
			value.set("tom" + i);
			writer.append(key, value);
		}
		
		writer.close();
		System.out.println("over!");
    }
	/**
	 * 改进写入方式
	 * 循环写入
	 * 写入本地文件系统
	 */
	@SuppressWarnings("deprecation")
	@Test
    public void write2Local() throws Exception {
    	Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "file:///");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("file:///f:/test/myseqfile.seq");
		// 配置 key-value对
		Writer writer = SequenceFile.createWriter(fs , conf, name , IntWritable.class, Text.class);
		IntWritable key = new IntWritable();
		Text value = new Text();
		
		// 写入数据
		for(int i=0; i<100; i++) {
			key.set(i);
			value.set("tom" + i);
			writer.append(key, value);
		}
		
		writer.close();
		System.out.println("over!");
    }
	
	/**
	 * 测试读SequenceFile文件
	 */
	@SuppressWarnings("deprecation")
	@Test
	public void readSeq() throws Exception {
		Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		
		IntWritable key = new IntWritable();
		Text value = new Text();
		// 创建读入器
		Reader reader = new SequenceFile.Reader(fs, name, conf);
		while(reader.next(key, value)) { // 读入到引用key 和 value中
			System.out.println(key + " : " + value);
		}
		reader.close();
	}
	/**
	 * 获得压缩 信息
	 * 获得key 起始位置
	 */
	@SuppressWarnings({ "deprecation", "unused" })
	@Test
	public void readSeqPostion() throws Exception {
		Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		
		IntWritable key = new IntWritable();
		Text value = new Text();
		// 创建读入器
		Reader reader = new SequenceFile.Reader(fs, name, conf);
		
		// 获得压缩类型
		CompressionType type = reader.getCompressionType();
		// 获得压缩编解码器
		CompressionCodec codec = reader.getCompressionCodec();
		// 获得 key类型
		Class<?> keyClass = reader.getKeyClass();
		// 获得 value 类型
		Class<?> valueClass = reader.getValueClass();
		
		// 获得key位置
		long pos = reader.getPosition();
		
		while(reader.next(key, value)) { // 读入到引用key 和 value中
			System.out.println(pos + " : " + key + " : " + value);
			pos = reader.getPosition();
		}
		reader.close();
	}
	
	/**
	 * 定位 
	 * seek 定位的边界不匹配就会出错
	 */
	@SuppressWarnings("deprecation")
	@Test
	public void seekSeqFile() throws Exception {
		Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		
		IntWritable key = new IntWritable();
		Text value = new Text();
		// 创建读入器
		Reader reader = new SequenceFile.Reader(fs, name, conf);
		
		// seek 到 恰当位置
		reader.seek(178);
		
		reader.next(key, value);
		System.out.println(key + " : " + value);
		
		// seek 到 不恰当位置
		// EOFException
//		reader.seek(179);
//		
//		reader.next(key, value);
//		System.out.println(key + " : " + value);
		
		reader.close();
	}
	
	/**
	 * 同步sync
	 * 		读取数据的实例出错后，能够再一次与记录边界同步的数据流中的一个位置
	 * seek 定位的边界不匹配就会出错
	 * sync 寻找匹配的下一个同步点
	 */
	@SuppressWarnings("deprecation")
	@Test
	public void syncSeqFile() throws Exception {
		Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		
		IntWritable key = new IntWritable();
		Text value = new Text();
		// 创建读入器
		Reader reader = new SequenceFile.Reader(fs, name, conf);
		
		// 定位到下一个同步点
		int syncPos = 128;
		reader.sync(syncPos);// Seek to the next sync mark past a given position.
		long pos = reader.getPosition();
		reader.next(key, value);
		System.out.println(syncPos + " : " + pos + " : " + key + " : " + value);
		
		reader.close();
	}
	/**
	 * 写文件，手动创建同步点
	 * 文件的写时覆盖 的
	 */
	@SuppressWarnings("deprecation")
	@Test
    public void writeWithSync() throws Exception {
    	Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile.seq");
		// 配置 key-value对
		Writer writer = SequenceFile.createWriter(fs , conf, name , IntWritable.class, Text.class);
		IntWritable key = new IntWritable();
		Text value = new Text();
		
		// 写入数据
		for(int i=1; i<=100; i++) {
			key.set(i);
			value.set("tom" + i);
			writer.append(key, value);
			if(i % 5==0)
				// 手动创建同步点
				writer.sync();
		}
		
		writer.close();
		System.out.println("over!");
    }
	/**
	 * 压缩写入
	 * 默认是 CompressionType.RECORD 记录压缩格式（记录压缩格式 的key没有被压缩）
	 */
	@SuppressWarnings("deprecation")
	@Test
    public void writeInCompress() throws Exception {
    	Configuration conf = new Configuration();
    	conf.set("fs.defaultFS", "hdfs://s101:8020/");
		FileSystem fs = FileSystem.newInstance(conf);
		Path name = new Path("hdfs://s101:8020/user/myseqfile3.seq");
		// 这些编解码器都是通过反射得到的
		SnappyCodec codec = ReflectionUtils.newInstance(SnappyCodec.class, conf);
		// 配置 key-value对
		// 默认是 CompressionType.RECORD压缩
		Writer writer = SequenceFile.createWriter(conf,fs.create(name),IntWritable.class,Text.class,CompressionType.BLOCK,codec);
		IntWritable key = new IntWritable();
		Text value = new Text();
		
		// 写入数据
		for(int i=1; i<=100; i++) {
			key.set(i);
			value.set("tom" + i);
			writer.append(key, value);
			if(i % 5==0)
				// 手动创建同步点
				writer.sync();
		}
		writer.close();
		System.out.println("over!");
    }
}
