package com.ailk.bigdata.etl.realstream.server.service.impl;

import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.text.DecimalFormat;
import java.util.Arrays;
import java.util.Calendar;
import java.util.List;
import java.util.PropertyResourceBundle;
import java.util.ResourceBundle;
import java.util.concurrent.TimeUnit;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ailk.bigdata.etl.common.server.io.DirParseUtil;
import com.ailk.bigdata.etl.common.server.io.FilenameUtil;
import com.ailk.bigdata.etl.common.server.model.hadoop.NameNodeInfo;
import com.ailk.bigdata.etl.common.server.model.hadoop.load.HdfsUserModel;
import com.ailk.bigdata.etl.common.server.thread.ThreadPool;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamUnit;
import com.ailk.bigdata.etl.realstream.server.service.RollingFileService;
import com.ailk.bigdata.etl.realstream.server.tools.RealStreamTool;
import com.ailk.bigdata.hadoop.hdfs.HdfsUtil;
import com.google.common.base.Preconditions;

/**
 * 
 * @description TODO
 * @author [xuwei3]
 * @version [版本号,2015-7-28]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class RollingFileServiceImpl implements RollingFileService
{

	public RollingFileWriter getRunnable(int rollInterval, String filePath, String fileName)
	{
		return new RollingFileWriter(rollInterval, filePath, fileName);
	}

	public void filePathManager(RealStreamUnit rStreamUnit, int poolSize)
	{
		ThreadPool.getScheduleThreadPool(RealStreamTool.THREAD_FILEMGR_POOL_NAME, poolSize).scheduleAtFixedRate(
				new FileManagerTask(rStreamUnit.getCreateFileDir()), 1, 2, TimeUnit.HOURS);
	}

	public void uploadHdfs(RealStreamUnit rStreamUnit)
	{
		ThreadPool.getTheadPool(RealStreamTool.THREAD_FILEMGR_POOL_NAME).runTask(new HdfsUploadTash(rStreamUnit));
	}
}

/**
 * 
 * @description 管理落地文件夹，每隔一段时间删除一次备份文件
 * @author [xuwei3]
 * @version [版本号,2015-6-18]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
class FileManagerTask implements Runnable
{
	private static Logger logger = LoggerFactory.getLogger(FileManagerTask.class);
	private static final String inUseSuffix = ".tmp";
	private String logPath;
	private final long REMOVE_FILE_INTERVAL = 24 * 3600 * 1000;

	public FileManagerTask(String logPath)
	{
		this.logPath = logPath;
	}

	/**
	 * @see java.lang.Runnable#run()
	 */
	@Override
	public void run()
	{
		String fullFilePath = FilePathFormatUtil.escapeString(logPath, true, Calendar.HOUR_OF_DAY, 1);
		logger.info("扫描目录为[" + fullFilePath + "]");
		File spoolDirectory = new File(fullFilePath);
		FileFilter filter = new FileFilter()
		{
			public boolean accept(File candidate)
			{
				String fileName = candidate.getName();
				if ((candidate.isDirectory()) || (fileName.startsWith(".")) || StringUtils.endsWith(fileName, inUseSuffix))
				{
					return false;
				}
				return true;
			}
		};

		List<File> candidateFiles = Arrays.asList(spoolDirectory.listFiles(filter));
		if (candidateFiles.isEmpty())
		{
			return;
		}
		// File.listRoots();
		long freeSpace = spoolDirectory.getFreeSpace();
		long totalSpace = spoolDirectory.getTotalSpace();
		logger.info("系统空间大小为[" + FormetFileSize(totalSpace) + "]，剩余空间[" + FormetFileSize(freeSpace) + "]");

		long currentTime = System.currentTimeMillis();
		for (int i = 0; i < candidateFiles.size(); i++)
		{
			File selectFile = candidateFiles.get(i);
			logger.info("日志文件[" + selectFile.getAbsolutePath() + "]最近修改日期为[" + selectFile.lastModified() + "],当前日期为[" + currentTime + "]");
			if (currentTime - selectFile.lastModified() > REMOVE_FILE_INTERVAL)
			{
				selectFile.delete();
				logger.info("日志文件[" + selectFile.getAbsolutePath() + "]过期，删除成功");
			}
		}
		// 备份到hdfs

	}

	public static String FormetFileSize(long fileS)
	{
		DecimalFormat df = new DecimalFormat("#.00");
		String fileSizeString = "";
		if (fileS < 1024)
		{
			fileSizeString = df.format((double) fileS) + "B";
		}
		else if (fileS < 1048576)
		{
			fileSizeString = df.format((double) fileS / 1024) + "K";
		}
		else if (fileS < 1073741824)
		{
			fileSizeString = df.format((double) fileS / 1048576) + "M";
		}
		else
		{
			fileSizeString = df.format((double) fileS / 1073741824) + "G";
		}
		return fileSizeString;
	}
}

/**
 * 
 * @description TODO
 * @author [xuwei3]
 * @version [版本号,2015-7-28]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
class HdfsUploadTash implements Runnable
{
	private static Logger logger = LoggerFactory.getLogger(HdfsUploadTash.class);

	private String fileSeparator = System.getProperty("file.separator");
	private String currentDir = System.getProperty("user.dir");// 当前路径
	private String currentParentDir = currentDir.substring(0, currentDir.lastIndexOf(fileSeparator));// bin的上级目录
	private String confPath = currentParentDir + fileSeparator + "conf" + fileSeparator + "hadoop.properties";
	private String separator = "_";
	private String completedSuffix = ".done";
	private String templateSuffix = ".tmp";
	private RealStreamUnit rStreamUnit;

	public HdfsUploadTash(RealStreamUnit rStreamUnit)
	{
		this.rStreamUnit = rStreamUnit;
	}
	
	
	@Override
	public void run()
	{
		try
		{
			BufferedInputStream inputStream = new BufferedInputStream(new FileInputStream(confPath));
			ResourceBundle bundle = new PropertyResourceBundle(inputStream);

			String hdfsLocal = "";
			String dateFormat = "";
			String logPath = rStreamUnit.getCreateFileDir();
			logger.info("上传文件到hdfs线程启动logPath[" + logPath + "]....................");

			NameNodeInfo namenode = new NameNodeInfo();

			hdfsLocal = bundle.getString("HdfsLocal");
			dateFormat = bundle.getString("DateFormat");
			String hdfsUri = bundle.getString("HdfsUri");
			String userName = bundle.getString("hdfsUser");
			String userPrincipal = bundle.getString("userPrincipal");
			String authType = bundle.getString("authType");
			System.setProperty("HADOOP_USER_NAME", userName);

			Preconditions.checkNotNull(hdfsLocal, "落地流数据上传到hdfs线程启动失败，hdfs上传路径不能为空，请检查相关配置");
			Preconditions.checkNotNull(dateFormat, "落地流数据上传到hdfs线程启动失败，日期格式不能为空不能为空，请检查相关配置");
			Preconditions.checkNotNull(hdfsUri, "落地流数据上传到hdfs线程启动失败，hdfsUri不能为空不能为空，请检查相关配置");

			namenode.setHdfsUri(hdfsUri);
			String hdfsUriStandby = bundle.getString("HdfsUriStandby");
			if (StringUtils.isNotEmpty(hdfsUriStandby))
			{
				namenode.setHdfsUriStandby(hdfsUriStandby);
			}

//			FileSystem filesystem = HdfsUtil.getFileSystem(namenode);
			
			/**  2017-05-14  **/
			HdfsUserModel userInfo = new HdfsUserModel();
			userInfo.setAuthType(Integer.valueOf(authType));
			userInfo.setUserPrincipal(userPrincipal);
			String keytabFileDefault = new StringBuilder().append(System.getProperty("user.home")).append(System.getProperty("file.separator"))
					.append(System.getProperty("user.name")).append(".keytab").toString();
			String krb5ConfDefault = new StringBuilder().append(System.getProperty("user.home")).append(System.getProperty("file.separator"))
					.append("krb5.conf").toString();
			logger.info("keytab_file==>"+keytabFileDefault);
			logger.info("krb5_file==>"+krb5ConfDefault);
			FileSystem filesystem = HdfsUtil.getFileSystem(namenode,userInfo);
			/** molj 2017-05-14 **/
			
			
			File spoolDirectory = new File(logPath);
			FileFilter filter = new FileFilter()
			{
				public boolean accept(File candidate)
				{
					String fileName = candidate.getName();
					if ((candidate.isDirectory()) || (fileName.endsWith(completedSuffix)) || (fileName.endsWith(templateSuffix)))
					{
						return false;
					}
					return true;
				}
			};

			while (true)
			{
				File[] filterdFiles = spoolDirectory.listFiles(filter);

				if (null == filterdFiles || filterdFiles.length == 0)
				{
					try
					{
						Thread.sleep(1000);
						continue;
					}
					catch (InterruptedException e)
					{
					}
				}

				for (File filterdFile : filterdFiles)
				{
					logger.info("filterdFile[" + filterdFile.getAbsolutePath() + "]");
					String localPath = filterdFile.getAbsolutePath();
					Path hdfsPath = new Path(DirParseUtil.parseDataDate2(hdfsLocal,
							StringUtils.substringAfterLast(FilenameUtil.getBaseName(localPath), separator).substring(0, dateFormat.length()),
							dateFormat));
					try
					{
						if (!filesystem.exists(hdfsPath))
						{
							filesystem.mkdirs(hdfsPath);
						}
					}
					catch (Exception e1)
					{
					}
					try
					{

						filesystem.copyFromLocalFile(true, new Path(localPath), hdfsPath);
						logger.info("filterdFile[" + filterdFile.getAbsolutePath() + "]结束over");
					}
					catch (Exception e)
					{
						logger.error("", e);
					}
					hdfsPath = null;
				}
				filterdFiles = null;
			}
		}
		catch (Exception e)
		{
			logger.error("", e);
		}
		finally
		{
			System.clearProperty("HADOOP_USER_NAME");
		}
	}
	
}

