package com.imooc.bigdata.hos.server.filemgr.service;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;

import org.apache.commons.io.FileExistsException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Service;

import com.imooc.bigdata.hos.core.HosConfiguration;

/**
 * Created by jixin on 18-3-15.
 */
@Service("hdfsServiceImpl")
public class HdfsServiceImpl implements IHdfsService {

  private static Logger logger = Logger.getLogger(HdfsServiceImpl.class);

  private FileSystem fileSystem;

  /*
    存储文件时，设定为大于20m就存到hdfs中。
    hdfs默认的block是128m。
    所以我们设定，当文件大小小于63m，我们手动的将其block大小设置为64m。
   */
  private long defaultBlockSize = 128 * 1024 * 1024;
  private long initBlockSize = defaultBlockSize / 2;


  public HdfsServiceImpl() throws Exception {
    //读取hdfs相关的配置信息
    HosConfiguration hosConfiguration = null;

    String confDir = System.getenv("HADOOP_CONF_DIR");
    if (confDir == null) {
      confDir = System.getProperty("HADOOP_CONF_DIR");
    }
    if (confDir == null) {
      hosConfiguration = HosConfiguration.getConfiguration();
      confDir = hosConfiguration.getString("hadoop.conf.dir");
    }
    if (!new File(confDir).exists()) {
      throw new FileNotFoundException(confDir);
    }

    //todo 校验路径
    String hdfsUri =  hosConfiguration.getString("hadoop.uri");

    //通过配置，获取一个filesystem的实例
    Configuration conf = new Configuration();
    conf.addResource(new Path(confDir + "/core-site.xml"));
    conf.addResource(new Path(confDir + "/hdfs-site.xml"));
//    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
//    conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
//    fileSystem = FileSystem.get(new URI("hdfs://localhost:9000"), conf);
    fileSystem = FileSystem.get(new URI(hdfsUri), conf);
  }

  @Override
  public void saveFile(String dir, String name,
                        InputStream input, long length, short replication)
          throws IOException {
    //判断dir是否存在，不存在则新建
    Path dirPath = new Path(dir);
    try {
      if (!fileSystem.exists(dirPath)) {
        boolean succ = fileSystem.mkdirs(dirPath, FsPermission.getDirDefault());
        logger.info("create dir " + dirPath + " success" + succ);
        if (!succ) {
          throw new IOException("dir create failed:" + dir);
        }
      }
    } catch (FileExistsException ex) {
      ex.printStackTrace();
    }

    //保存文件
    Path path = new Path(dir + "/" + name);
    long blockSize = length <= initBlockSize ? initBlockSize : defaultBlockSize;
    FSDataOutputStream outputStream = fileSystem.create(path, true, 512 * 1024, replication, blockSize);
    try {
      fileSystem.setPermission(path, FsPermission.getFileDefault());
      byte[] buffer = new byte[512 * 1024];
      int len = -1;
      while ((len = input.read(buffer)) > 0) {
        outputStream.write(buffer, 0, len);
      }
    } finally {
      input.close();
      outputStream.close();
    }

  }

  @Override
  public void deleteFile(String dir, String name) throws IOException {
    //false表示是否递归删除
    fileSystem.delete(new Path(dir + "/" + name), false);
  }

  @Override
  public InputStream openFile(String dir, String name) throws IOException {
    return fileSystem.open(new Path(dir + "/" + name));
  }

  @Override
  public void mikDir(String dir) throws IOException {
    fileSystem.mkdirs(new Path(dir));
  }

  @Override
  public void deleteDir(String dir) throws IOException {
    //true表示是否递归删除
    this.fileSystem.delete(new Path(dir), true);
  }
}
