//package hadoop_3;
//
//import java.io.BufferedInputStream;
//import java.io.FileInputStream;
//import java.io.IOException;
//import java.io.InputStream;
//import java.net.URI;
//import org.apache.hadoop.conf.Configuration;
//import org.apache.hadoop.fs.FSDataOutputStream;
//import org.apache.hadoop.fs.FileSystem;
//import org.apache.hadoop.fs.Path;
//import org.apache.hadoop.io.IOUtils;
//
//
//public class Test1 {
//
//
//	public static void main(String[] args) throws IOException {
//		createFileCopy();
//	}
//
//	public static  void  createFileCopy() throws IOException{
//		String src="/home/hadoop/word.txt";
//		String dst="hdfs://master:9000/hdfs/word.txt";
//		Configuration conf=new Configuration();//1.configuration
//		FileSystem  fs=FileSystem.get( URI.create(dst),  conf);//2.FileSystem  fs
//
//	  InputStream  in=new BufferedInputStream(new FileInputStream(src));//3.输入流(读取虚拟机的word.txt)
//		FSDataOutputStream out=fs.create(new Path(dst));//  输出流  fs.create()  用来给hadoop的hdfs中写出到一个hdfs/word.txt中
//		IOUtils.copyBytes(in, out, 4096,false);//4.IOUtils.copyByte(in, out,4096,false)
//		IOUtils.closeStream(in);//5.关闭
//	}
//
//
//	public static  void  appendFile() throws IOException{
//		String src="/home/hadoop/word.txt";
//		String dst="hdfs://master:9000/hdfs/word.txt";
//		Configuration conf=new Configuration();//1.configuration
//		conf.set("dfs.client.block.write.replace-datanode-on-failure", "NEVER");
//		conf.setBoolean("dfs.client.block.write.replace-datanode-on-failure.enable", true);
//
//		FileSystem  fs=FileSystem.get( URI.create(dst),  conf);//2.FileSystem  fs
//
//	  InputStream  in=new BufferedInputStream(new FileInputStream(src));//3.输入流(读取虚拟机的word.txt)
//		FSDataOutputStream out=fs.create(new Path(dst));//  输出流  fs.create()  用来给hadoop的hdfs中写出到一个hdfs/word.txt中
//		IOUtils.copyBytes(in, out, 4096,false);//4.IOUtils.copyByte(in, out,4096,false)
//		IOUtils.closeStream(in);//5.关闭
//	}
//
//
//
//
//}
