package com.starnet.yarnmonitor.yarn.service.impl;

import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.starnet.yarnmonitor.security.custom.JwtUserDetailsService;
import com.starnet.yarnmonitor.security.entity.SysUser;
import com.starnet.yarnmonitor.security.mapper.SysUserMapper;
import com.starnet.yarnmonitor.security.utils.UserUtil;
import com.starnet.yarnmonitor.yarn.service.HDFSService;
import com.starnet.yarnmonitor.yarn.conf.HDFSConf;
import com.starnet.yarnmonitor.yarn.entity.FilesInHDFS;
import com.starnet.yarnmonitor.yarn.handler.ApiException;
import com.starnet.yarnmonitor.yarn.mapper.FileMapper;
import com.starnet.yarnmonitor.yarn.utils.PageUtils;
import org.apache.hadoop.fs.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.userdetails.User;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;

import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import java.util.regex.Pattern;

@Service(value = "hdfsService")
public class HDFSServiceImpl implements HDFSService {

    @Autowired
    FileSystem myHDFS;
    @Autowired
    FileMapper fileMapper;
    @Autowired
    HDFSConf hdfsConf;
    @Autowired
    SysUserMapper sysUserMapper;


    @Transactional
    @Override
    public void uploadFile(MultipartFile multipartFile, String path) throws IOException {
        if (path != null) {
            //处理一下path的头尾
            if (!path.startsWith("/")) {
                path = "/" + path;
            }

            if (!path.endsWith("/")) {
                path = path + "/";
            }
        }
        //如果没传path 默认指定前缀的根目录
        if (path == null) {
            path = "/";
        }
        String pattern = "(/(\\w)*)*/";
        //如果路径中间有乱七八糟的 抛异常
        if (!Pattern.matches(pattern, path)) {
            throw new ApiException("路径参数错误,路径请满足(/(\\w)*)*/");
        }

        String fileName = multipartFile.getOriginalFilename();
        //文件相对路径拼接
        String filePath = hdfsConf.getFileRelativePath() + path + fileName;
        //路径下已经有这个文件名的文件的话抛异常
        if (myHDFS.exists(new Path(filePath))) {
            throw new ApiException("该路径下已存在该文件，请更换路径");
        }

        //路径没被占用的话 存数据库+存hdfs
        InputStream is = multipartFile.getInputStream();
        FSDataOutputStream os = myHDFS.create(new Path(filePath));
//
//        byte[] buffer = new byte[1024];
//        int len = 0;
//        while ((is.read(buffer)) != -1) {
//            os.write(buffer);
//        }

        os.write(is.readAllBytes());
        is.close();
        os.close();


        //从hdfs查文件详细信息
        FileStatus[] fileStatuses = myHDFS.listStatus(new Path(hdfsConf.getFileAbsolutePath() + path + fileName));
        if (fileStatuses.length == 0) {
            throw new ApiException("上传失败");
        }
        FileStatus fileStatus = fileStatuses[0];
        //插入数据库的对象 fileInHDFS
        FilesInHDFS filesInHDFS = new FilesInHDFS();
        filesInHDFS.setFileName(multipartFile.getOriginalFilename());
        filesInHDFS.setSize(fileStatus.getLen());
        filesInHDFS.setPath(path + fileName);
        filesInHDFS.setReplication((int) fileStatus.getReplication());
        filesInHDFS.setEncrypted(fileStatus.isEncrypted());
        filesInHDFS.setErasureCoded(fileStatus.isErasureCoded());
        //记录上传用户id
        filesInHDFS.setUserId(UserUtil.getCurrentUserId());
        //获取当前时间
        Timestamp curTime = Timestamp.valueOf(LocalDateTime.now());
        filesInHDFS.setCreatedTime(curTime);

        //处理hdfs内文件被删了 数据库里的path又被占用的问题
        LambdaQueryWrapper<FilesInHDFS> queryWrapper = new LambdaQueryWrapper<>();
        queryWrapper.eq(FilesInHDFS::getPath, path + fileName);
        FilesInHDFS isExistInDB = fileMapper.selectOne(queryWrapper);
        if (isExistInDB != null) {
            int i = fileMapper.deleteById(isExistInDB.getId());
            if (i != 1) {
                throw new ApiException("数据库清楚无用路径文件失败");
            }
        }


        int insert = fileMapper.insert(filesInHDFS);
        if (insert != 1) {
            throw new ApiException("上传失败，写入数据库异常");
        }

    }

    @Override
    public void downloadFile(Long id, HttpServletResponse response) throws IOException {
        FilesInHDFS filesInHDFS = fileMapper.selectById(id);
        if (filesInHDFS == null) {
            throw new ApiException("不存在该文件");
        }
        boolean isSuper = UserUtil.isSuper();
        int currentUserId = UserUtil.getCurrentUserId();
        //只有超管可以下别人的文件，普通用户只能下载自己的文件
        if ((currentUserId != filesInHDFS.getUserId()) && !isSuper) {
            throw new ApiException("普通用户只能下载自己上传的文件");
        }

        //相对路径
        String relativePath = filesInHDFS.getPath();
        //hdfs里面的绝对路径
        String absolutePath = hdfsConf.getFileAbsolutePath() + relativePath;
        //设置返回的头部
        response.setContentType("application/octet-stream");
        response.addHeader("Content-Disposition", "filename=" + filesInHDFS.getFileName());
        FSDataInputStream inputStream = myHDFS.open(new Path(absolutePath));
        ServletOutputStream outputStream = response.getOutputStream();

//        byte[] buffer = new byte[1024];
//        int len = 0;
//        while ((len = inputStream.read(buffer)) != 1) {
//            outputStream.write(buffer);
//        }
        outputStream.write(inputStream.readAllBytes());
        //关闭流
        outputStream.close();
        inputStream.close();

    }

    @Override
    public IPage<FilesInHDFS> listYourFiles(Integer cur, Integer size) {
        IPage<FilesInHDFS> iPage = PageUtils.getIPage(cur, size, FilesInHDFS.class);
        int userId = UserUtil.getCurrentUserId();
        LambdaQueryWrapper<FilesInHDFS> queryWrapper = new LambdaQueryWrapper<>();
        queryWrapper.eq(FilesInHDFS::getUserId, userId);
        //仅查看当前用户id
        fileMapper.selectPage(iPage, queryWrapper);
        return iPage;
    }

    @Override
    public IPage<FilesInHDFS> listAllFiles(Integer cur, Integer size) {
        IPage<FilesInHDFS> iPage = PageUtils.getIPage(cur, size, FilesInHDFS.class);
        //查看所有的文件
        fileMapper.selectPage(iPage, null);
        return iPage;
    }

    @Transactional
    @Override
    public void deleteFileById(Long id) throws IOException {
        //检查数据库有没有这个id的文件
        FilesInHDFS filesInHDFS = fileMapper.selectById(id);
        if (filesInHDFS == null) {
            throw new ApiException("此id文件不存在");
        }
        //检查hdfs有没有这个文件
        boolean exists = myHDFS.exists(new Path(hdfsConf.getFileAbsolutePath() + filesInHDFS.getPath()));
        if (!exists) {
            throw new ApiException("此id文件不存在");
        }
        boolean isSuper = UserUtil.isSuper();
        int currentUserId = UserUtil.getCurrentUserId();
        //只有超管可以下别人的文件，普通用户只能下载自己的文件
        if ((currentUserId != filesInHDFS.getUserId()) && !isSuper) {
            throw new ApiException("普通用户不能删除别人的文件");
        }


        //删数据库
        int i = fileMapper.deleteById(id);
        if (i != 1) {
            throw new ApiException("删除失败");
        }
        //删hdfs
        boolean delete = myHDFS.delete(new Path(hdfsConf.getFileAbsolutePath() + filesInHDFS.getPath()), false);
        if (!delete) {
            throw new ApiException("删除失败");
        }
        //如果他的同级没有别的文件 把他这一级也删了
        deleteUselessDirectory(filesInHDFS.getPath());

    }

    /***
     *
     * @param path 删除的文件全路径,包含文件名
     * @throws IOException
     */
    public void deleteUselessDirectory(String path) throws IOException {
        if (path.equals("/")) {
            return;
        }
        int i = path.lastIndexOf("/");
        //截取到上一级
        path = path.substring(0, i);
        System.out.println(hdfsConf.getFileAbsolutePath() + path);
        FileStatus[] fileStatuses = myHDFS.listStatus(new Path(hdfsConf.getFileAbsolutePath() + path));
        //如果这个文件的父目录下没有别的文件 把父目录删除
        if (fileStatuses.length == 0) {
            myHDFS.delete(new Path(hdfsConf.getFileAbsolutePath() + path), false);
            deleteUselessDirectory(path);
        } else {
            //有别的文件就终止
            return;
        }
    }
}
