package test.spark;

import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

/**
 * Hello world!
 */
public class App {
	public static void main(String[] args) {
		Configuration conf = new Configuration();
		try {
			FileSystem fs = FileSystem.get(conf);
			String pathName = "/user/zhanghl/hiveview_mr_job2.log";
			FSDataOutputStream out= fs.create(new Path(pathName ),true);
			
			
			BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out));
			int index = 0;
			while (true && index <1000) {
				index++;
				bw.write("adsf dasfa fdas f dasdf dasf das dfas fd s fdas fdas ");
				bw.flush();
				System.out.println("写入文件");
				Thread.sleep(10*2);
			}
			out.flush();
			fs.close();
		} catch (IOException e) {
			e.printStackTrace();
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}
}
