package com.lvmama.java.rhino.spark.utils;

import java.io.IOException;
import java.io.Serializable;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;

public class HdfsUtils implements Serializable {
	private static final long serialVersionUID = 4262287022861189790L;
	private static final Logger LOGGER = Logger.getLogger(HdfsUtils.class);
	private static final String BASE_DIR = Constants.getInstance().getProperty("hadoop.hdfs.base.dir");
	transient private final Configuration conf = new Configuration();
	transient private static FileSystem fs = null;
	/** Map<数据表, HDFS输出流> */
	private static final Map<String, FSDataOutputStream> fsDataOutputStreamPool = new HashMap<String, FSDataOutputStream>();
	/** Map<数据表, HDFS输出文件> */
	private static final Map<String, String> fsFileNamePool = new HashMap<String, String>();
	private static HdfsUtils instance;
	
	private HdfsUtils() {
		if(fs == null) {
			synchronized (HdfsUtils.class) {
				if(fs == null) {
					conf.set("fs.defaultFS", Constants.getInstance().getProperty("hadoop.hdfs.url"));
					conf.set("dfs.support.append", "true");
					try {
						fs = FileSystem.get(conf);
					} catch (IOException e) {
						e.printStackTrace();
						LOGGER.error(e.getMessage(), e);
					}
				}
			}
		}
	}
	
	public static HdfsUtils getInstance() {
		if(instance == null) {
			synchronized(HdfsUtils.class) {
				if(instance == null) {
					instance = new HdfsUtils();
				}
			}
		}
		return instance;
	}
	
	public boolean exists(String dir) {
		Path path = new Path(dir);
		boolean falg = false;
		try {
			falg = fs.exists(path);
		} catch (IOException e) {
			e.printStackTrace();
		}
		return falg;
	}
	
	public FSDataOutputStream getFsDataOutputStream(String tableName) throws IOException {
		String dir = getHdfsDirName(tableName);
		String fileName = dir + "/" + getHdfsFileName(tableName);
		Path path = new Path(dir);
		FSDataOutputStream outputStream = fsDataOutputStreamPool.get(tableName);
		// 文件存在，分2种情况：1、文件在spark启动前已经存在；2、启动的时候不存在，但是其他Spark集群初始化了
		if (fs.exists(path)) {
			// 如果文件夹存在，但是输出流为空，那么这个时候是Spark刚启动，那么则将流以及文件名进行初始化
			if (outputStream == null) {
				outputStream = initOutputStream(tableName, fileName);
			} else {
				// 如果文件夹存在，但是输出流不空，并且新生成的文件名不等于现有的文件名，则说明，文件名发生改变，及Spark的运行到到了另外一天
				// 那么将现有的输出流关闭，并重新获取输出流，且给文件名赋值
				if (!fileName.equals(fsFileNamePool.get(tableName))) {
					outputStream.close();
					outputStream = initOutputStream(tableName, fileName);
				}
			}
		} else {
			// 若是文件不存在，则说明今天是Spark第一次执行
			// 若是输出流不为空，则说明，在今天之前，Spark就一直在运行，则把旧的流关一下，因为咱们有一个新的开始了
			if (outputStream != null) {
				outputStream.close();
			}
			// 创建文件夹路径，不强制创建，因为在当前线程尝试去创建的时候，可能已经有其他线程已经创建好了
			boolean flag = fs.mkdirs(path);
			if (flag) {
				LOGGER.info("创建目录成功：" + dir);
			} else {
				LOGGER.info("创建目录失败：" + dir);
			}
			outputStream = initOutputStream(tableName, fileName);
		}
		return outputStream;
	}

	private FSDataOutputStream initOutputStream(String tableName, String fileName) throws IOException {
		FSDataOutputStream outputStream;
		Path filePath = new Path(fileName + System.currentTimeMillis());
		outputStream = fs.create(filePath, false, 512);
		fsDataOutputStreamPool.put(tableName, outputStream);
		fsFileNamePool.put(tableName, fileName);
		LOGGER.info("创建文件成功：" + filePath.getName());
		return outputStream;
	}
	
	private String getHdfsDirName(String tableName) {
		Calendar cal = Calendar.getInstance();
		StringBuffer sb = new StringBuffer(BASE_DIR);
		sb.append("/").append(tableName);
		sb.append("/").append(cal.get(Calendar.YEAR));
		sb.append("/").append(cal.get(Calendar.MONTH) + 1);
		sb.append("/").append(cal.get(Calendar.DATE));
		return sb.toString();
	}

	private String getHdfsFileName(String tableName) {
		tableName = tableName.replace(" ", "").toLowerCase();
		return tableName + ".data";
	}
	
	public void close() throws IOException {
		Iterator<String> it = fsDataOutputStreamPool.keySet().iterator();
		while (it.hasNext()) {
			FSDataOutputStream fsDataOutputStream = fsDataOutputStreamPool.get(it.next());
			if(fsDataOutputStream != null) {
				fsDataOutputStream.close();
			}
		}
		fsFileNamePool.clear();
	}
}
