package com.hdfs.file;

import java.io.FileInputStream;
import java.io.InputStream;
import java.io.OutputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;

public class TestUpload {

	@Test
	public void testUpload1() throws Exception{
		//配置NameNode：HDFS的主节点
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://192.168.157.111:9000");
		
		//得到HDFS的客户端
		FileSystem fs = FileSystem.get(conf);
		
		//打开一个输入流，字节流
		InputStream in = new FileInputStream("d:\\temp\\hadoop-2.7.3.tar.gz");
		
		//创建一个输出流 ------> HDFS
		OutputStream out = fs.create(new Path("/tools/a.tar.gz"));
		
		//创建一个缓冲区
		byte[] buffer = new byte[1024];
		//数据长度
		int len = 0;
		
		while( (len=in.read(buffer)) > 0 ){
			//读入了数据，写到输出流中
			out.write(buffer, 0, len);
		}
		
		out.flush();
		
		//关闭流
		in.close();
		out.close();
	}
	
	
	@Test
	public void testUpload2() throws Exception{
		//使用HDFS的工具类，简化程序
		//配置NameNode：HDFS的主节点
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://192.168.157.111:9000");
		
		//得到HDFS的客户端
		FileSystem fs = FileSystem.get(conf);
		
		//打开一个输入流，字节流
		InputStream in = new FileInputStream("d:\\temp\\hadoop-2.7.3.tar.gz");
		
		//创建一个输出流 ------> HDFS
		OutputStream out = fs.create(new Path("/tools/b.tar.gz"));
		
		//使用工具类
		IOUtils.copyBytes(in, out, 1024);
	}
}

