package com.zyl.file.platform;

import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.HttpMethod;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
import com.amazonaws.services.s3.model.ListPartsRequest;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PartListing;
import com.amazonaws.services.s3.model.PartSummary;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.SftpATTRS;
import com.jcraft.jsch.SftpException;
import com.zyl.file.constant.StateConstants;
import com.zyl.file.dto.SplitUploadDTO;
import com.zyl.file.dto.SplitUploadRecordDTO;
import com.zyl.file.exception.MyException;
import com.zyl.file.mapper.StEquipmentMapper;
import com.zyl.file.mapper.StFileMapper;
import com.zyl.file.po.StEquipment;
import com.zyl.file.po.StFile;
import com.zyl.file.result.ResultCode;
import com.zyl.file.util.AssertUtils;
import com.zyl.file.util.FileCheckUtils;
import com.zyl.file.util.FtpUtils;
import com.zyl.file.vo.SplitUploadVO;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.MediaType;
import org.springframework.http.MediaTypeFactory;

import javax.annotation.Resource;
import java.io.File;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * @author zyl
 * @Description
 * @since 2024/1/18 17:11
 */
@Slf4j
public class AmazonS3FileStorage extends FileStorage{

    @Resource
    private StEquipmentMapper stEquipmentMapper;

    @Resource
    private Map<String, AmazonS3> amazonS3ClientMap;

    @Resource
    private StFileMapper stFileMapper;

    // 预签名url过期时间(ms)
    public static final Long PRE_SIGN_URL_EXPIRE = 60 * 10 * 1000L;

    @Value("${upload.chunk-size}")
    private Long chunkSize;
    @Value("${upload.lan-proxy-ip}")
    private String lanProxyIp;
    @Value("${upload.lan-proxy-port}")
    private String lanProxyPort;
    @Value("${upload.wan-proxy-ip}")
    private String wanProxyIp;
    @Value("${upload.wan-proxy-port}")
    private String wanProxyPort;

    @Override
    public SplitUploadDTO initTask(SplitUploadVO param, Long userId) {
        StFile byIdentifier = getByIdentifier(param.getIdentifier());
        if (!ObjectUtil.isNull(byIdentifier)) {
            throw new MyException(ResultCode.PARAM_ERROR, "本次上传有内容一样的文件，只能成功上传其中一个");
        }
        StFile task = new StFile();
        String fileName = param.getFileName();
        String fileName1 = IdUtil.randomUUID();
        String suffix = fileName.substring(fileName.lastIndexOf(".") + 1, fileName.length());
        String key = "";
        String bucketName = "";
        String uploadId = "";
        Date currentDate = new Date();
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectById(param.getEquipmentId());
        //首先获取当前时间作为文件上传到云存储服务的文件夹名称，同时获取云存储服务的bucket名称以及上传的文件名。
        bucketName = stEquipment.getBucket();
        key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), fileName1, suffix);
        //根据上传的文件名获取文件后缀名，并使用工具类StrUtil和DateUtil生成唯一的文件名。
        //获取上传文件的Content-Type，并创建一个ObjectMetadata对象来保存Content-Type信息。
        String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentType(contentType);
        //使用Amazon S3 SDK的InitiateMultipartUploadRequest方法初始化一个上传请求，并传入bucket名称、文件名和ObjectMetadata对象。
        InitiateMultipartUploadResult initiateMultipartUploadResult = null;
        try {
            initiateMultipartUploadResult = amazonS3ClientMap.get(param.getEquipmentId())
                    .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key).withObjectMetadata(objectMetadata));
        } catch (AmazonServiceException e) {
            if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                log.error("存储桶不存在，连接失败。");
                AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
            } else {
                log.error("连接出错：" + e.getMessage());
                AssertUtils.isTrue(true, "连接出错，请核对相关配置");
            }
        }
        uploadId = initiateMultipartUploadResult.getUploadId();
        task.setUrl(getPath(stEquipment.getStorageAddress(), bucketName, key));
        int chunkNum = (int) Math.ceil(param.getTotalSize() * 1.0 / param.getChunkSize());
        task.setBucketName(bucketName)
                .setChunkNum(chunkNum)
                .setChunkSize(param.getChunkSize())
                .setSize(param.getTotalSize())
                .setFileMd5(param.getIdentifier())
                .setOriginalFilename(fileName)
                .setFilename(fileName1)
                .setExt(suffix)
                .setFilePath(bucketName + "/" + key)
                .setObjectKey(key)
                .setCreateUser(userId)
                .setUploadId(uploadId)
                .setBusiBatchNo(param.getBusiBatchNo())
                .setSourceFileMd5(param.getSourceFileMd5())
                .setEquipmentId(param.getEquipmentId());

        stFileMapper.insert(task);
        SplitUploadRecordDTO dto = new SplitUploadRecordDTO();
        BeanUtil.copyProperties(task, dto);
        return new SplitUploadDTO()
                .setFinished(false)
                .setTaskRecord(dto)
                .setPath(getPath(stEquipment.getStorageAddress(), bucketName, key));
    }

    @Override
    public SplitUploadDTO getTaskInfo(String identifier, Integer isEncrypt) {
        long l = System.currentTimeMillis();

        StFile task = getByIdentifier(identifier);
        long l1 = System.currentTimeMillis();
        System.out.println("1:" + (l1 - l));
        if (task == null) {
            return null;
        }
        SplitUploadRecordDTO dto = new SplitUploadRecordDTO();
        BeanUtil.copyProperties(task, dto);
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        SplitUploadDTO result = new SplitUploadDTO().setFinished(true).setTaskRecord(dto).setPath(getPath(stEquipment.getStorageAddress(), task.getBucketName(), task.getObjectKey()));
        long l2 = System.currentTimeMillis();
        System.out.println("2：" + (l2 - l1));
        if (StateConstants.COMMON_ONE.equals(task.getIsUploadOk())){
            return result;
        }
        //当任意一个存储设备上成功上传过某个文件，再向另一个存储设备上传相同的文件，不在这个设备重新上传，直接秒传
        boolean doesObjectExist = false;
        try {
            doesObjectExist = amazonS3ClientMap.get(task.getEquipmentId()).doesObjectExist(task.getBucketName(), task.getObjectKey());
        } catch (AmazonServiceException e) {
            if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                log.error("存储桶不存在，连接失败。");
                AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
            } else {
                log.error("连接出错：" + e.getMessage());
                AssertUtils.isTrue(true, "连接出错，请核对相关配置");
            }
        }
        if (!doesObjectExist) {
            // 未上传完，返回已上传的分片
            try {
                ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
                PartListing partListing = amazonS3ClientMap.get(task.getEquipmentId()).listParts(listPartsRequest);
                result.setFinished(false).getTaskRecord().setExitPartList(partListing.getParts());
            } catch (AmazonS3Exception e) {
                log.info(task.getUploadId() + "上传ID有问题，重新创建上传ID重新上传");
                stFileMapper.deleteById(task.getId());
                return null;
            }
        }
        long l3 = System.currentTimeMillis();
        System.out.println("2：" + (l3 - l2));
        return result;
    }

    @Override
    public String genPreSignUploadUrl(String identifier, Integer partNumber, String type, Boolean isOpen) {
        AssertUtils.isNull(type, "type不能为空");
        StFile task = getByIdentifier(identifier);
        AssertUtils.isNull(task, "分片任务不存在");
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);
        String newIP = null;
        String newPort = null;
        if ("1".equals(type)) {
            newIP = lanProxyIp;
            newPort = lanProxyPort;
        } else {
            newIP = wanProxyIp;
            newPort = wanProxyPort;
        }
        Map<String, String> params = new HashMap<>();
        params.put("partNumber", partNumber.toString());
        params.put("uploadId", task.getUploadId());
        Date currentDate = new Date();
        Date expireDate = DateUtil.offsetMillisecond(currentDate, PRE_SIGN_URL_EXPIRE.intValue());
        GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(task.getBucketName(), task.getObjectKey())
                .withExpiration(expireDate).withMethod(HttpMethod.PUT);
        if (params != null) {
            params.forEach((key, val) -> request.addRequestParameter(key, val));
        }
        URL preSignedUrl = null;
        try {
            preSignedUrl = amazonS3ClientMap.get(task.getEquipmentId()).generatePresignedUrl(request);
        } catch (AmazonServiceException e) {
            if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                log.error("存储桶不存在，连接失败。");
                AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
            } else {
                log.error("连接出错：" + e.getMessage());
                AssertUtils.isTrue(true, "连接出错，请核对相关配置");
            }
        }
        String url = preSignedUrl.toString();

        String modifiedUrl = url.replace(preSignedUrl.getHost() + ":" + preSignedUrl.getPort(), newIP + ":" + newPort);
        return modifiedUrl;
    }

    @Override
    public StFile merge(String identifier, Boolean isFlat) {
        long start = System.currentTimeMillis();
        AssertUtils.isNull(isFlat, "isFlat：是否偏离矫正判断条件不能为空");
        StFile task = getByIdentifier(identifier);

        if (task == null) {
            throw new RuntimeException("分片任务不存");
        }
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        //判断文件是否存在。
        boolean doesObjectExist = false;
        try {
            doesObjectExist = amazonS3ClientMap.get(task.getEquipmentId()).doesObjectExist(task.getBucketName(), task.getObjectKey());
        } catch (AmazonServiceException e) {
            if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                log.error("存储桶不存在，连接失败。");
                AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
            } else {
                log.error("连接出错：" + e.getMessage());
                AssertUtils.isTrue(true, "连接出错，请核对相关配置");
            }
        }
        if (doesObjectExist) {
            return new StFile();
        }
        ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
        PartListing partListing = amazonS3ClientMap.get(task.getEquipmentId()).listParts(listPartsRequest);
        List<PartSummary> parts = partListing.getParts();
        if (!task.getChunkNum().equals(parts.size())) {
            // 已上传分块数量与记录中的数量不对应，不能合并分块
            throw new RuntimeException("分片缺失，请重新上传");
        }
        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withUploadId(task.getUploadId())
                .withKey(task.getObjectKey())
                .withBucketName(task.getBucketName())
                .withPartETags(parts.stream().map(partSummary -> new PartETag(partSummary.getPartNumber(), partSummary.getETag())).collect(Collectors.toList()));
        amazonS3ClientMap.get(task.getEquipmentId()).completeMultipartUpload(completeMultipartUploadRequest);
        long end1 = System.currentTimeMillis();
        log.info("（minio）合并文件的操作耗时：{}(毫秒)", end1 - start);
        stFileMapper.update(null, new UpdateWrapper<StFile>()
                .set("is_upload_ok", StateConstants.COMMON_ONE)
                .eq("id", task.getId()));
        long end = System.currentTimeMillis();
        log.info("合并文件总耗时（合并+合并后操作）：{}(毫秒)", end - start);
        return task;
    }

    @Override
    public void close() throws Exception {

    }
}
