package cn.mingyuan.hadoop.fileoperation;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

/**
 * 写文件的一个工具类 对hadoop0.20.0版本，不支持向已存在的文件中追加内容，只能覆盖原来的文件。<br>
 * 原因： 在使用FileSystem.append(Path)方法创建FSDataOutputStream时 <br>
 * 需要dfs.support.append属性为true,而0.20.0中默认不支持true：<br>
 * 在hdfs.default.xml中对dfs.support.append有这样的描述:Does HDFS allow appends to files?
 * This is currently set to false because there are bugs in the "append code"
 * and is not supported in any prodction cluster.
 * 
 * @author mingyuan
 * 
 */
public class FileWriter {
	private Configuration conf;
	private Path filePath;
	private FSDataOutputStream fsos;
	private FileSystem fs;

	/**
	 * Constructor
	 * 
	 * @param fileName
	 *            文件路径
	 * @param overwrite
	 *            是否覆盖
	 */
	public FileWriter(String fileName) {
		this.conf = new Configuration();
		this.filePath = new Path(fileName);
		try {
			fs = FileSystem.get(filePath.toUri(), conf);
			fsos = fs.create(filePath, true);
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	/**
	 * 追加数据
	 * 
	 * @param content
	 *            所要追加的内容
	 */
	public void append(byte[] content) {
		try {
			fsos.write(content);
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	/**
	 * 结束写入
	 */
	public void close() {
		try {
			fsos.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		String path = "hdfs://localhost:9000/data/newfile.txt";
		FileWriter appender = new FileWriter(path);
		appender.append("abcdefghijklmn\nthis is a test string\nending mechine".getBytes());
		appender.close();
	}

}
