package hdfsexample;

import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;



public class MergeFile {
	Path inputPath = null;  // 待合并的文件所在的目录的路径
	Path outputPath = null; // 输出文件的路径
	String hostURI = null;
	
	public MergeFile(String host, String input, String output) {
		this.inputPath = new Path(input);
		this.outputPath = new Path(output);
		hostURI = host;
	}
	
	public MergeFile(String input, String output) {
		this.inputPath = new Path(input);
		this.outputPath = new Path(output);
		hostURI = "hdfs://localhost:9000";
	}
	
	public void setHostURI(String uri) {
		this.hostURI = uri;
	}
	
	public void doMerge() throws IOException {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", hostURI);
		conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
		FileSystem fsSource = FileSystem.get(URI.create(inputPath.toString()), conf);
		FileSystem fsDest = FileSystem.get(URI.create(outputPath.toString()), conf);
		
		// 过滤输入目录中扩展名为.abc的文件和input目录
		
		FileStatus[] sourceStatus = fsSource.listStatus(
				inputPath, 
				new MyPathFilter(".*(\\.abc)"));
		FSDataOutputStream fsdos = fsDest.create(outputPath);
		PrintStream ps = new PrintStream(System.out);
		
		// 分别读取过滤之后的每个文件的内容，并输出到同一个文件中
		for (FileStatus status : sourceStatus) {
			// 输出扩展名不为.abc的文件的路径、文件大小和权限
			System.out.print("路径:" + status.getPath() + 
				" 文件大小:" + status.getLen() + 
				" 权限:" + status.getPermission() + 
				" 内容:");
			FSDataInputStream fsdis = fsSource.open(status.getPath());
			byte[] data = new byte[1024];
			int read = -1;
			
			while ((read = fsdis.read(data)) > 0) {
				ps.write(data, 0, read);
				fsdos.write(data, 0, read);
			}
			fsdis.close();
		}
		ps.close();
		fsdos.close();
	}

}
