package com.ls.fw.hadoop.client.base.impl;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.io.IOUtils;
import org.mortbay.jetty.InclusiveByteRange;

import com.ls.fw.hadoop.client.base.BaseHadoopDao;
import com.ls.fw.hadoop.client.bean.Datanode;
import com.ls.fw.hadoop.client.bean.FileTreeItem;
import com.ls.fw.hadoop.client.bean.Filter;
import com.ls.fw.hadoop.client.bean.HadoopFileStatus;
import com.ls.fw.hadoop.client.bean.Namenode;
import com.ls.fw.hadoop.client.bean.PageBean;
import com.ls.fw.hadoop.client.constant.Constant;
import com.ls.fw.hadoop.client.constant.StatusType;
import com.ls.fw.hadoop.client.exception.HadoopException;
import com.ls.fw.hadoop.client.util.DateTimeUtil;

public class BaseHadoopDaoImpl implements BaseHadoopDao {

	final Log LOG = LogFactory.getLog(BaseHadoopDaoImpl.class);
	public static final String CONTENT_LENGTH = "Content-Length";
	public final static int BUFFER = 4096;

	private String hdfsURL = "";
	private Configuration conf = new HdfsConfiguration();
	private FileContext fc = null;
	private DistributedFileSystem dfs = new DistributedFileSystem();
	private FileSystem fs = null;
	private DFSClient dfsClient = null;

	public BaseHadoopDaoImpl() {
	}
	public BaseHadoopDaoImpl(String hdfsURL) {
		this.hdfsURL = hdfsURL;
		this.switchHDFS(hdfsURL);
	}

	@Override
	public void uploadFile(String srcPath, String destPath)
			throws HadoopException {
		// 因为本地文件系统是基于java.io包的，所以我们创建一个本地文件输入流
		InputStream in = null;
		// 创建一个指向HDFS目标文件的输出流
		// 用IOUtils工具将文件从本地文件系统复制到HDFS目标文件中
		FileSystem fs = null;
		OutputStream out = null;
		try {
			fs = this.getFileSystem();
			out = fs.create(new Path(destPath));
			in = new BufferedInputStream(new FileInputStream(srcPath));
			IOUtils.copyBytes(in, out, BUFFER, true);
		} catch (Exception e) {
			throw new HadoopException(e.getMessage(), e);
		} finally {
			if (out != null) {
				try {
					out.close();
				} catch (IOException e) {
				}
			}
			if (in != null) {
				try {
					in.close();
				} catch (IOException e) {
				}
			}
		}
	}

	/**
	 * 重命名
	 * 
	 * @author ls 2013-8-30 下午04:26:36
	 * @param srcPath
	 * @param destPath
	 * @return
	 * @throws Exception
	 */
	@Override
	public boolean rename(String srcPath, String destPath, FileSystem fs)
			throws Exception {
		boolean flag = false;
		try {
			Path src = new Path(srcPath);
			Path dst = new Path(destPath);
			if (isExist(srcPath, fs) && !isExist(destPath, fs)) {
				flag = fs.rename(src, dst);
			}
		} catch (IOException e) {
			throw e;
		} catch (Exception e1) {
			throw e1;
		} finally {

		}
		return flag;
	}

	@Override
	public boolean rename(String srcPath, String destPath) throws Exception {
		return rename(srcPath, destPath, this.getFileSystem());
	}

	/**
	 * 获取文件状态信息
	 * 
	 * @author ls 2013-8-30 下午04:15:44
	 * @param srcPath
	 * @return
	 * @throws IOException
	 */
	@Override
	public FileStatus getFileStatus(String srcPath) throws IOException {
		FileStatus fileStatus = null;
		FileSystem fs = null;
		try {
			fs = this.getFileSystem();
			final Path listpath = fs.makeQualified(new Path(srcPath));
			fileStatus = fs.getFileStatus(listpath);
		} catch (IOException e) {
			throw e;
		} finally {

		}
		return fileStatus;
	}

	/**
	 * 删除文件
	 * 
	 * @author ls 2013-8-30 下午04:08:47
	 * @param srcPath
	 * @return
	 * @throws IOException
	 */
	@Override
	public boolean deleteFile(String srcPath) throws IOException {
		FileSystem fs = null;
		boolean flag = false;
		try {
			fs = this.getFileSystem();
			Path src = new Path(srcPath);
			flag = fs.delete(src, false);
		} catch (IOException e) {
			throw e;
		} finally {

		}
		return flag;
	}

	/**
	 * 删除目录
	 * 
	 * @author ls 2013-8-30 下午04:09:37
	 * @param srcPath
	 * @param recursive
	 *            是否删除子文件
	 * @return
	 * @throws IOException
	 */
	@Override
	public boolean deleteDir(String srcPath, boolean recursive)
			throws IOException {
		FileSystem fs = null;
		boolean flag = false;
		try {
			fs = this.getFileSystem();
			final Path listpath = fs.makeQualified(new Path(srcPath));
			flag = fs.delete(listpath, recursive);
		} catch (IOException e) {
			throw e;
		} finally {

		}
		return flag;
	}

	/**
	 * 删除文件
	 * 
	 * @author ls 2013-9-2 下午05:18:49
	 * @param srcPath
	 * @return
	 * @throws IOException
	 */
	@Override
	public boolean delete(String srcPath, FileSystem fs) throws IOException {
		boolean flag = false;
		try {
			final Path listpath = fs.makeQualified(new Path(srcPath));
			flag = fs.delete(listpath, true);
		} catch (IOException e) {
			throw e;
		} finally {

		}
		return flag;
	}

	@Override
	public boolean delete(String srcPath) throws IOException {
		return delete(srcPath, this.getFileSystem());
	}

	/**
	 * 复制文件
	 * 
	 * @author ls 2013-8-30 下午04:02:22
	 * @param srcPath
	 * @param destPath
	 * @throws IOException
	 */
	@Override
	public void copyFile(String srcPath, String destPath, FileSystem fs)
			throws IOException {
		try {
			Path src = new Path(srcPath);
			Path dst = new Path(destPath);
			fs.copyFromLocalFile(src, dst);
		} catch (IOException e) {
			throw e;
		} finally {
			if (fs != null) {
				try {
					fs.close();
				} catch (IOException e) {
				}
			}
		}
	}

	@Override
	public void copyFile(String srcPath, String destPath) throws IOException {
		copyFile(srcPath, destPath, this.getFileSystem());
	}

	/**
	 * 获取顶级目录
	 * 
	 * @author ls 2013-9-6 上午09:40:50
	 * @return
	 * @throws IOException
	 */
	public List<FileTreeItem> getRootDirectory() throws IOException {
		return getRootDirectory(this.getFileSystem());
	}

	/**
	 * 获取顶级目录
	 * 
	 * @author ls 2013-9-6 上午09:41:09
	 * @return
	 * @throws Exception
	 */
	public List<FileTreeItem> getRootDirectoryByFC() throws Exception {
		RemoteIterator<FileStatus> lists = null;
		try {
			final Path listpath = getFC().makeQualified(
					new Path(this.hdfsURL + "/"));
			lists = getFC().listStatus(listpath);
		} catch (Exception e) {
			throw e;
		} finally {
		}
		List<FileTreeItem> list = null;
		if (lists != null) {
			list = new ArrayList<FileTreeItem>();
			while (lists.hasNext()) {
				FileStatus fileStatus = lists.next();
				if (fileStatus.isDirectory()
						&& fileStatus.getPath().getName().equals(
								Constant.except_dirName)) {
					continue;
				}
				list.add(new FileTreeItem(fileStatus.getPath().getName(),
						fileStatus.getPath().toString().replace(this.hdfsURL,
								""), fileStatus.isDirectory()));
			}
		}
		return list;
	}

	/**
	 * 获取根目录
	 * 
	 * @author ls 2013-8-30 上午09:17:35
	 * @return
	 * @throws IOException
	 */
	public List<FileTreeItem> getRootDirectory(FileSystem fs)
			throws IOException {
		FileStatus[] fileStatuss = null;
		try {
			final Path listpath = fs.makeQualified(new Path("/"));
			fileStatuss = fs.listStatus(listpath);
		} catch (IOException e) {
			throw e;
		} finally {
			if (fs != null) {

			}
		}
		List<FileTreeItem> list = null;
		if (fileStatuss != null) {
			int length = fileStatuss.length;
			list = new ArrayList<FileTreeItem>(length);
			for (FileStatus fileStatus : fileStatuss) {
				if (fileStatus.isDirectory()
						&& fileStatus.getPath().getName().equals(
								Constant.except_dirName)) {
					continue;
				}
				// if(fileStatus[i].isDirectory()){
				list.add(new FileTreeItem(fileStatus.getPath().getName(),
						fileStatus.getPath().toString().replace(this.hdfsURL,
								""), fileStatus.isDirectory()));
				// }
			}
		}
		return list;
	}

	public List<FileTreeItem> getFiles(String dirPath, int page, int pageSize)
			throws Exception {
		return getFiles(dirPath, page, pageSize, this.getFileSystem());

	}

	/**
	 * 获取目录下的文件
	 * 
	 * @author ls 2013-9-4 上午09:15:05
	 * @param dirPath
	 * @param page
	 * @param pageSize
	 * @return
	 * @throws Exception
	 */
	public List<FileTreeItem> getFiles(String dirPath, int page, int pageSize,
			FileSystem fs) throws Exception {
		FileStatus[] fileStatus = null;
		try {
			final Path listpath = fs.makeQualified(new Path(dirPath));
			fileStatus = fs
					.listStatus(listpath, new Filter(page, pageSize, ""));
		} catch (Exception e) {
			throw e;
		} finally {
			if (fs != null) {

			}
		}
		List<FileTreeItem> list = null;
		if (fileStatus != null) {
			int allRow = fileStatus.length;
			if (pageSize <= 0) {//
				pageSize = allRow;
			}
			int endIndex = allRow;
			if (page <= 0) {
				page = 1;
			}
			// 起始偏移量
			int offset = PageBean.countOffset(pageSize, page);
			if (pageSize > 0) {
				endIndex = offset + pageSize;
				list = new ArrayList<FileTreeItem>(pageSize);
			} else {
				list = new ArrayList<FileTreeItem>();
			}
			if (offset >= allRow) {
				page = 1;
				offset = 0;
				endIndex = offset + pageSize;
			}

			for (int i = offset; (i < allRow) && (i < endIndex); i++) {
				list
						.add(new FileTreeItem(
								fileStatus[i].getPath().getName(),
								fileStatus[i].getPath().toString().replace(
										this.hdfsURL, ""), fileStatus[i]
										.isDirectory()));
			}
			if (allRow > endIndex) {
				list.add(new FileTreeItem(Constant.MORE, Constant.MORE
						.toString(), false));
			}
		}

		return list;
	}

	/**
	 * 获取目录下的文件
	 * 
	 * @author ls 2013-8-30 下午05:21:10
	 * @param dirPath
	 * @return
	 * @throws IOException
	 */
	public List<FileTreeItem> getFiles(String dirPath) throws Exception {
		return getFiles(dirPath, 1, -1);
	}

	/**
	 * 获取目录下的文件
	 * 
	 * @author ls 2013-8-30 下午04:20:04
	 * @param dirPath
	 * @return
	 * @throws IOException
	 */
	public FileStatus[] getChildFile(String dirPath) throws IOException {
		return getChildFile(dirPath, this.getFileSystem());
	}

	public FileStatus[] getChildFile(String dirPath, FileSystem fs)
			throws IOException {
		FileStatus[] fileStatus = null;
		try {
			final Path listpath = fs.makeQualified(new Path(dirPath));
			fileStatus = fs.listStatus(listpath);
		} catch (IOException e) {
			throw e;
		} finally {

		}
		return fileStatus;
	}

	public FileStatus[] findFile(String dirPath, final String filterStr,
			int page, int pageSize, FileSystem fs) throws Exception {
		FileStatus[] files = null;
		try {
			fs = this.getFileSystem();
			final Path listpath = fs.makeQualified(new Path(dirPath));
			files = fs.listStatus(listpath, new Filter(page, pageSize,
					filterStr));
			/* files = m_files; */
		} catch (Exception e) {
			throw e;
		} finally {

		}

		return files;
	}

	public RemoteIterator<FileStatus> findFileByFC(String dirPath,
			final String filterStr, int page, int pageSize) throws Exception {
		RemoteIterator<FileStatus> lists = null;
		try {
			final Path listpath = getFC().makeQualified(
					new Path(hdfsURL + dirPath));
			lists = getFC().listStatus(listpath);
		} catch (Exception e) {
			throw e;
		} finally {
		}

		return lists;
	}

	public PageBean query(String path, String name, int page, int pageSize)
			throws Exception {
		FileStatus[] lists = this.listStatus(path, page, pageSize);
		int allRow = this.getTotalNum(path);
		List<HadoopFileStatus> list = null;
		if (lists != null) {
			if (page <= 0) {
				page = 1;
			}
			if (page <= 0) {
				page = 1;
			}
			// 起始偏移量
			int offset = PageBean.countOffset(pageSize, page);
			if (pageSize > 0) {
				list = new ArrayList<HadoopFileStatus>(pageSize);
			} else {
				list = new ArrayList<HadoopFileStatus>();
			}
			if (offset >= allRow) {
				page = 1;
				offset = 0;
			}
			HadoopFileStatus hf = null;
			for (FileStatus fileStatus : lists) {
				hf = new HadoopFileStatus(this.getHdfsURL(),fileStatus.getPath().getName(),
						fileStatus.getPath().toString().replace(this.hdfsURL,
								""), fileStatus.isDirectory(), fileStatus
								.getLen(), fileStatus.getReplication(),
						fileStatus.getBlockSize(),
						(String) DateTimeUtil.getFormatDate(fileStatus
								.getModificationTime()), fileStatus
								.getPermission().toString(), fileStatus
								.getOwner(), fileStatus.getGroup());
				list.add(hf);
			}
		}

		PageBean pageBean = PageBean.getInstance(page, pageSize, allRow, list);
		return pageBean;
	}

	public List<HadoopFileStatus> findHadoopFileStatus(String dirPath,
			int page, int pageSize) throws Exception {
		RemoteIterator<FileStatus> lists = null;
		List<HadoopFileStatus> list = null;
		try {
			final Path listpath = getFC().makeQualified(
					new Path(hdfsURL + dirPath));
			lists = getFC().listStatus(listpath);
			if (lists != null) {
				int size = 0;
				int offSet = 0;
				int current_offeset = 0;
				if (page <= 0) {
					page = 1;
				}// 起始偏移量
				offSet = (PageBean.countOffset(pageSize, page));
				if (pageSize > 0) {
					list = new ArrayList<HadoopFileStatus>(pageSize);
				} else {
					list = new ArrayList<HadoopFileStatus>();
				}
				if (offSet < 0) {
					offSet = 0;
				}
				HadoopFileStatus hf = null;
				while (lists.hasNext()) {
					if (size >= pageSize) {// 已经获取的数量》=要求的数量,则退出
						break;
					}
					if (current_offeset >= offSet) {// 当前索引位置》=要求的索引位置，则保存该数据
						FileStatus fileStatus = lists.next();
						hf = new HadoopFileStatus(this.getHdfsURL(),fileStatus.getPath()
								.getName(), fileStatus.getPath().toString()
								.replace(this.hdfsURL, ""), fileStatus
								.isDirectory(), fileStatus.getLen(), fileStatus
								.getReplication(), fileStatus.getBlockSize(),
								(String) DateTimeUtil.getFormatDate(fileStatus
										.getModificationTime()), fileStatus
										.getPermission().toString(), fileStatus
										.getOwner(), fileStatus.getGroup());
						list.add(hf);
						size++;
						/*
						 * System.out.println("current_offeset" +
						 * current_offeset); System.out.println("size" + size);
						 * System.out.println("fileStatus" +
						 * fileStatus.getPath());
						 */continue;
					}
					lists.next();
					current_offeset++;
				}
			}
		} catch (Exception e) {
			throw e;
		} finally {
		}
		return list;
	}

	public List<FileTreeItem> findFileTreeItem(String dirPath, int page,
			int pageSize) throws Exception {
		RemoteIterator<FileStatus> lists = null;
		List<FileTreeItem> list = null;
		try {
			final Path listpath = getFC().makeQualified(
					new Path(hdfsURL + dirPath));
			lists = getFC().listStatus(listpath);
			if (lists != null) {
				int size = 0;
				int offSet = 0;
				int current_offeset = 0;
				if (page <= 0) {
					page = 1;
				}// 起始偏移量
				offSet = (PageBean.countOffset(pageSize, page));
				if (pageSize > 0) {
					list = new ArrayList<FileTreeItem>(pageSize);
				} else {
					list = new ArrayList<FileTreeItem>();
				}
				if (offSet < 0) {
					offSet = 0;
				}
				while (lists.hasNext()) {
					if (size >= pageSize) {// 已经获取的数量》=要求的数量,则退出
						break;
					}
					if (current_offeset >= offSet) {// 当前索引位置》=要求的索引位置，则保存该数据
						FileStatus fileStatus = lists.next();
						list.add(new FileTreeItem(fileStatus.getPath()
								.getName(), fileStatus.getPath().toString()
								.replace(this.hdfsURL, ""), fileStatus
								.isDirectory()));
						size++;
						/*
						 * System.out.println("current_offeset" +
						 * current_offeset); System.out.println("size" + size);
						 * System.out.println("fileStatus" +
						 * fileStatus.getPath());
						 */continue;
					}
					lists.next();
					current_offeset++;
				}
			}
		} catch (Exception e) {
			throw e;
		} finally {
		}
		return list;
	}

	public List<FileStatus> findFileStatusByFC(String dirPath, int page,
			int pageSize) throws Exception {
		RemoteIterator<FileStatus> lists = null;
		List<FileStatus> list = null;
		try {
			final Path listpath = getFC().makeQualified(
					new Path(hdfsURL + dirPath));
			lists = getFC().listStatus(listpath);
			if (lists != null) {
				int size = 0;// 已经保存的数量
				int offSet = 0;// 开始取数据的下标
				int current_offeset = 0;// 当前下标
				if (page <= 0) {
					page = 1;
				}// 起始偏移量
				offSet = (PageBean.countOffset(pageSize, page));
				if (pageSize > 0) {
					list = new ArrayList<FileStatus>(pageSize);
				} else {
					list = new ArrayList<FileStatus>();
				}
				if (offSet < 0) {
					offSet = 0;
				}
				while (lists.hasNext()) {
					if (size >= pageSize) {// 已经获取的数量》=要求的数量,则退出
						break;
					}
					if (current_offeset >= offSet) {// 当前索引位置》=要求的索引位置，则保存该数据
						FileStatus fileStatus = lists.next();
						/*
						 * System.out.println("current_offeset" +
						 * current_offeset); System.out.println("size" + size);
						 * System.out.println("fileStatus" +
						 * fileStatus.getPath());
						 */list.add(fileStatus);
						size++;
						continue;
					}
					lists.next();
					current_offeset++;
				}
			}
		} catch (Exception e) {
			throw e;
		} finally {
		}
		return list;
	}

	/**
	 * 查找符合条件的文件
	 * 
	 * @author ls 2013-8-30 下午04:23:55
	 * @param dirPath
	 * @param filterStr
	 * @return
	 * @throws Exception
	 */
	public FileStatus[] findFile(String dirPath, final String filterStr,
			int page, int pageSize) throws Exception {
		return findFile(dirPath, filterStr, page, pageSize, this
				.getFileSystem());
	}

	/**
	 * 过滤掉符合条件的文件
	 * 
	 * @author ls 2013-8-30 下午04:40:09
	 * @param dirPath
	 * @param filterStr
	 * @return
	 * @throws Exception
	 */
	public FileStatus[] filterFile(String dirPath, final String filterStr)
			throws Exception {
		FileSystem fs = null;
		FileStatus[] files = null;
		try {
			fs = this.getFileSystem();
			final Path listpath = fs.makeQualified(new Path(dirPath));
			if (filterStr == null || "".equals(filterStr.trim())) {
				files = fs.listStatus(listpath);
			} else {
				files = fs.listStatus(listpath, new PathFilter() {
					public boolean accept(Path path) {
						if (path.getName().indexOf(filterStr) <= -1) {
							return true;
						} else {
							return false;
						}
					}
				});
			}
		} catch (IOException e) {
			throw e;
		} finally {
		}
		return files;
	}

	public String upload(InputStream inStream, String savePath) {
		String dstPath = "";
		FileSystem fs = null;
		FSDataOutputStream oStream = null;
		try {
			fs = this.getFileSystem();
			;
			dstPath = savePath;
			Path dstpath = new Path(dstPath);
			oStream = fs.create(dstpath);
			// int allSize = inStream.available();
			byte[] buffer = new byte[1024];
			int length = 0;
			while ((length = inStream.read(buffer)) > 0) {
				oStream.write(buffer, 0, length);
				buffer = null;
				buffer = new byte[1024];
			}
			oStream.flush();
		} catch (Exception ex) {
			dstPath = "error:" + ex.getMessage();
			this.LOG.info("", ex);
		} finally {
			if (oStream != null) {
				try {
					oStream.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			oStream = null;

		}
		return dstPath;
	}

	/**
	 * 查看HDFS文件是否存在
	 * 
	 * @author ls 2013-8-30 下午04:27:37
	 * @param path
	 * @param hdfs
	 * @return
	 * @throws Exception
	 */
	@Override
	public boolean isExist(String path, FileSystem hdfs) throws IOException {
		Path dst = new Path(path);
		return hdfs.exists(dst);
	}

	/**
	 * 上传本地文件到HDFS
	 * 
	 * @author ls 2013-8-30 下午05:13:13
	 * @param srcPath
	 * @param destPath
	 * @throws Exception
	 */
	@Override
	public void uploadFileFromLocalFile(String srcPath, String destPath)
			throws HadoopException {
		FileSystem fs = null;
		try {
			fs = this.getFileSystem();
			if (isExist(srcPath, fs)) {
				Path src = new Path(srcPath);
				Path dst = new Path(destPath);
				fs.copyFromLocalFile(src, dst);
			}
		} catch (Exception e) {
			throw new HadoopException(e.getMessage(), e);
		} finally {

		}
	}

	/**
	 * 下载HDFS到本地文件
	 * 
	 * @author ls 2013-8-30 下午05:10:56
	 * @param hdfsFilePath
	 * @param dstPath
	 * @throws Exception
	 */
	@Override
	public void downFile(String srcPath, String destPath, FileSystem fs)
			throws HadoopException {
		try {
			if (isExist(srcPath, fs)) {
				Path src = new Path(srcPath);
				Path dst = new Path(destPath);
				fs.copyToLocalFile(src, dst);
			}
		} catch (Exception e) {
			throw new HadoopException(e.getMessage(), e);
		} finally {

		}
	}

	/**
	 * 将文件下载到本地
	 * 
	 * @author ls 2013-9-6 上午09:39:44
	 * @param srcPath
	 * @param destPath
	 * @throws Exception
	 */
	@Override
	public void downFileToLocal(String srcPath, String destPath)
			throws HadoopException {
		FileSystem fs = null;
		OutputStream out = null;
		FSDataInputStream hdfsInStream = null;
		try {
			fs = this.getFileSystem();
			if (isExist(srcPath, fs)) {
				// 写入本地文件系统
				// 打开文件流
				hdfsInStream = fs.open(new Path(srcPath));
				out = new FileOutputStream(destPath);
				byte[] ioBuffer = new byte[1024];
				// 按行读取
				int readLen = 0;
				while ((readLen = hdfsInStream.read(ioBuffer)) > 0) {
					out.write(ioBuffer, 0, readLen);
					ioBuffer = null;
					ioBuffer = new byte[1024];
				}
			}
		} catch (Exception e) {
			throw new HadoopException(e.getMessage(), e);
		} finally {

			if (out != null) {
				try {
					out.close();
				} catch (Exception e) {
				}
			}
			if (hdfsInStream != null) {
				try {
					hdfsInStream.close();
				} catch (Exception e) {
				}
			}

		}
	}

	@Override
	public void downFile(String srcPath, String destPath)
			throws HadoopException {
		downFile(srcPath, destPath, this.getFileSystem());
	}

	/**
	 * 打开文件输出流
	 * 
	 * @author ls 2013-8-30 下午04:45:59
	 * @param path
	 * @return
	 * @throws Exception
	 */
	@Override
	public OutputStream getOutputStream(String path) throws Exception {
		FSDataOutputStream fos = null;
		FileSystem fs = null;
		try {
			fs = this.getFileSystem();
			fos = fs.create(new Path(path));
		} catch (Exception e) {
			throw e;
		} finally {

		}
		return fos;
	}

	@Override
	public FileStatus[] listStatus(String path, int page, int pageSize)
			throws IOException {
//		final Path listpath = getDFS().makeQualified(new Path(path));
//		return getDFS().listStatus(listpath, page, pageSize);
		return null;
	}

	@Override
	public int getTotalNum(String dirPath) throws IOException {
//		DistributedFileSystem dfs = getDFS();
//		final Path listpath = dfs.makeQualified(new Path(dirPath));
//		return dfs.getTotalNum(listpath);
		return 0;
	}

	/**
	 * 下载文件
	 * 
	 * @author ls 2013-9-6 下午05:04:43
	 * @param request
	 * @param response
	 * @param path
	 *            文件路径
	 * @param name
	 *            下载名称
	 * @throws IOException
	 */
	@Override
	@SuppressWarnings("unchecked")
	public void down(HttpServletRequest request, HttpServletResponse response,
			String path, String name) throws HadoopException {

		DFSInputStream in = null;
		OutputStream out = null;
		Enumeration<String> reqRanges = request.getHeaders("Range");
		if (reqRanges != null && !reqRanges.hasMoreElements()) {
			reqRanges = null;
		}
		try {
			in = getDfsClient().open(path);
			out = response.getOutputStream();
			final long fileLen = in.getFileLength();
			if (reqRanges != null) {
				List<InclusiveByteRange> ranges = InclusiveByteRange
						.satisfiableRanges(reqRanges, fileLen);
				this.sendPartialData(in, out, response, fileLen, ranges);
			} else {
				// No ranges, so send entire file
				response.setHeader("Content-Disposition",
						"attachment; filename=\"" + name + "\"");
				response.setContentType("application/octet-stream");
				response.setHeader(CONTENT_LENGTH, "" + fileLen);
				this.copyFromOffset(in, out, 0L, fileLen);
			}
		} catch (IOException e) {
			throw new HadoopException(e.getMessage(), e);
		} finally {
			if (in != null) {
				try {
					in.close();
					in = null;
				} catch (Exception e) {
				}
			}
			if (out != null) {
				try {
					out.close();
					out = null;
				} catch (Exception e) {
				}
			}
		}
	}

	/**
	 * Send a partial content response with the given range. If there are no
	 * satisfiable ranges, or if multiple ranges are requested, which is
	 * unsupported, respond with range not satisfiable.
	 * 
	 * @param in
	 *            stream to read from
	 * @param out
	 *            stream to write to
	 * @param response
	 *            http response to use
	 * @param contentLength
	 *            for the response header
	 * @param ranges
	 *            to write to respond with
	 * @throws IOException
	 *             on error sending the response
	 */
	private void sendPartialData(FSInputStream in, OutputStream out,
			HttpServletResponse response, long contentLength,
			List<InclusiveByteRange> ranges) throws IOException {
		if (ranges == null || ranges.size() != 1) {
			response.setContentLength(0);
			response
					.setStatus(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE);
			response.setHeader("Content-Range", InclusiveByteRange
					.to416HeaderRangeString(contentLength));
		} else {
			InclusiveByteRange singleSatisfiableRange = ranges.get(0);
			long singleLength = singleSatisfiableRange.getSize(contentLength);
			response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
			response.setHeader("Content-Range", singleSatisfiableRange
					.toHeaderRangeString(contentLength));
			copyFromOffset(in, out, singleSatisfiableRange
					.getFirst(contentLength), singleLength);
		}
	}

	/* Copy count bytes at the given offset from one stream to another */
	private void copyFromOffset(FSInputStream in, OutputStream out,
			long offset, long count) throws IOException {
		in.seek(offset);
		IOUtils.copyBytes(in, out, count, false);
	}

	@Override
	public synchronized FileContext getFC() {
		if (fc == null) {
			try {
				fc = FileContext.getFileContext(conf);
			} catch (UnsupportedFileSystemException e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
		return this.fc;
	}

	@Override
	public synchronized DistributedFileSystem getDFS()  throws HadoopException{
		if (dfs == null) {
			dfs = new DistributedFileSystem();
			try {
				dfs.initialize(URI.create(hdfsURL), conf);
			} catch (IOException e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
		
		return this.dfs;
	}

	@Override
	public synchronized FileSystem getFS() throws HadoopException {
		if (fs == null) {
			try {
				fs = FileSystem.get(URI.create(hdfsURL), conf);
			} catch (IOException e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
		return this.fs;
	}

	@Override
	public synchronized DFSClient getDfsClient() throws HadoopException {
		if (dfsClient == null) {
			try {
				dfsClient = new DFSClient(URI.create(hdfsURL), conf);
			} catch (IOException e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
		return this.dfsClient;
	}

	public FileSystem getFileSystem() throws HadoopException {
		if (fs == null) {
			try {
				fs = FileSystem.get(URI.create(this.hdfsURL), conf);
			} catch (Exception e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
		return fs;
	}

	@Override
	public void switchHDFS(String hdfsUrl)  throws HadoopException{
		synchronized (this) {
			if(StringUtils.isNotBlank(hdfsUrl)){
				hdfsURL = hdfsUrl;
				conf.set("fs.default.name", hdfsURL);
				conf.set("fs.defaultFS", hdfsURL);
			}
			try {
				if (fc == null) {
					fc = FileContext.getFileContext(conf);
				}
				if (dfs == null) {
					dfs = new DistributedFileSystem();
				}
				dfs.initialize(URI.create(hdfsURL), conf);

				try {
					if (fs != null) {
						fs.close();
					}
				} catch (Exception e) {
				}
				fs = null;
				fs = FileSystem.get(URI.create(hdfsURL), conf);
				try {
					if (dfsClient != null) {
						dfsClient.close();
					}
				} catch (Exception e) {
				}
				dfsClient = null;
				dfsClient = new DFSClient(URI.create(hdfsURL), conf);
			} catch (UnsupportedFileSystemException e) {
				throw new HadoopException(e.getMessage(), e);
			} catch (IOException e) {
				throw new HadoopException(e.getMessage(), e);
			}
		}
	}

	@Override
	public void close() {
		synchronized (this) {
			try {
				if (fs != null) {
					fs.close();
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
			fs = null;
			try {
				if (dfs != null) {
					dfs.close();
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
			dfs = null;
			try {
				if (fs != null) {
					fs.close();
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
			fs = null;
			try {
				if (dfsClient != null) {
					dfsClient.close();
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
			dfsClient = null;
		}
	}

	public void setHdfsURL(String hdfsURL) {
		this.hdfsURL = hdfsURL;
	}

	public String getHdfsURL() {
		return hdfsURL;
	}

	@Override
	public  Namenode getNamenodeList( ) throws IOException{
		DistributedFileSystem dfs = getDFS();
		return new Namenode(false,dfs.getCanonicalServiceName(),dfs.getStatus().getCapacity()
				,dfs.getStatus().getUsed() ,dfs.getStatus().getRemaining()
				,0,""  
				,0,dfs.getUri().getHost(),"");
	}
	/**
	 * 获取数据节点
	 * @author ls 2013-9-9 上午10:43:13
	 * @param status -1:全部，0：dead,1:live
	 * @return
	 * @throws IOException 
	 */
	@Override
	public  List<Datanode> getDatanodeList(StatusType status) throws IOException{
		DistributedFileSystem dfs = getDFS();
		List<Datanode> list = new ArrayList<Datanode>();
		if(status == StatusType.all || (status !=StatusType.dead && status!=StatusType.live)){
			DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
			if(live!=null){
				for (DatanodeInfo datanodeInfo : live) {
					list.add(new Datanode(false,datanodeInfo.getName(),datanodeInfo.getCapacity()
							,datanodeInfo.getDfsUsed(),datanodeInfo.getRemaining()
							,datanodeInfo.getBlockPoolUsed()
							,DateTimeUtil.getFormatDate(datanodeInfo.getLastUpdate())
							,datanodeInfo.getXceiverCount()
							,datanodeInfo.getHostName(),""));
				}
			}
			DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
			if(dead!=null){
				for (DatanodeInfo datanodeInfo : dead) {
					list.add(new Datanode(true,datanodeInfo.getName(),datanodeInfo.getCapacity()
							,datanodeInfo.getDfsUsed(),datanodeInfo.getRemaining()
							,datanodeInfo.getBlockPoolUsed(),DateTimeUtil.getFormatDate(datanodeInfo.getLastUpdate())
							,datanodeInfo.getXceiverCount(),datanodeInfo.getHostName(),""));
			    }
			}
		}
		if(status ==StatusType.dead){
			DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
			if(dead!=null){
				for (DatanodeInfo datanodeInfo : dead) {
					list.add(new Datanode(true,datanodeInfo.getName(),datanodeInfo.getCapacity()
							,datanodeInfo.getDfsUsed(),datanodeInfo.getRemaining()
							,datanodeInfo.getBlockPoolUsed(),DateTimeUtil.getFormatDate(datanodeInfo.getLastUpdate())
							,datanodeInfo.getXceiverCount(),datanodeInfo.getHostName(),""));
				}
			}
		}
		if(status ==StatusType.live){
			DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
			if(live!=null){
				for (DatanodeInfo datanodeInfo : live) {
					list.add(new Datanode(false,datanodeInfo.getName(),datanodeInfo.getCapacity()
							,datanodeInfo.getDfsUsed(),datanodeInfo.getRemaining()
							,datanodeInfo.getBlockPoolUsed(),DateTimeUtil.getFormatDate(datanodeInfo.getLastUpdate())
							,datanodeInfo.getXceiverCount(),datanodeInfo.getHostName(),""));
				}
			}
		}
		return list;
	}

	@Override
	public void switchConf(String hdfsUrl,Configuration conf) throws HadoopException {
		this.conf = conf;
		switchHDFS(hdfsUrl);
	}
}
