package hdfs;

import java.io.IOException;
import java.io.OutputStream;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;

public class ConsistencyTest extends Assert{

	protected static Log log = LogFactory.getLog(FileSystemFullTest.class);
	protected FileSystem fs;
	
	public ConsistencyTest() throws IOException{
		
		fs = getFileSystem();
	}

	public FileSystem getFileSystem() throws IOException{
		
		Configuration conf = new Configuration();
		conf.set("mapred.job.tracker", "master:9001");
		conf.set("fs.default.name", "hdfs://master:9000");
		log.info("配置文件：" + conf);
		 
		FileSystem fs = FileSystem.get(conf);
		log.info("文件系统：" + fs);
		
		return fs;
	}
	
	@After
	public void testAfter() throws IOException{
		
		fs.close();
	}
	
	@Test
	public void testUnvisiable() throws IOException{
		
		Path p = new Path("p");
		OutputStream out = fs.create(p);
		out.write("content".getBytes());
		out.flush();
		log.info("长度：" + fs.getFileStatus(p).getLen());
		assertEquals(fs.getFileStatus(p).getLen(), 0);
	}
	
	@Test
	public void testVisiable() throws IOException{
		
		String content = "content123";
		Path p = new Path("p");
		FSDataOutputStream out = fs.create(p);
		out.write(content.getBytes());
		out.flush();
		out.sync();
		log.info("长度：" + fs.getFileStatus(p).getLen());
		assertEquals(fs.getFileStatus(p).getLen(), content.length());
	}
}
