package com.zyl.file.service.impl;

import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.lang.UUID;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.HttpMethod;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressEventType;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
import com.amazonaws.services.s3.model.ListPartsRequest;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PartListing;
import com.amazonaws.services.s3.model.PartSummary;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;
import com.baomidou.lock.annotation.Lock4j;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.zyl.file.constant.StateConstants;
import com.zyl.file.dto.ProgressMessageDTO;
import com.zyl.file.dto.SplitUploadDTO;
import com.zyl.file.dto.SplitUploadRecordDTO;
import com.zyl.file.dto.StFileDTO;
import com.zyl.file.exception.MyException;
import com.zyl.file.mapper.StEquipmentMapper;
import com.zyl.file.mapper.StFileMapper;
import com.zyl.file.po.StEquipment;
import com.zyl.file.po.StFile;
import com.zyl.file.result.Result;
import com.zyl.file.result.ResultCode;
import com.zyl.file.service.LocalSplitFileService;
import com.zyl.file.util.AssertUtils;
import com.zyl.file.util.DateUtils;
import com.zyl.file.util.FileCheckUtils;
import com.zyl.file.util.FtpUtils;
import com.zyl.file.util.JsonUtils;
import com.zyl.file.util.Md5Utils;
import com.zyl.file.vo.SplitUploadVO;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.SftpATTRS;
import com.jcraft.jsch.SftpException;
import com.zyl.file.service.SplitUploadTaskService;
import com.zyl.file.websocket.WebSocketFrameHandler;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.MediaType;
import org.springframework.http.MediaTypeFactory;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.CollectionUtils;

import javax.annotation.Resource;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.SequenceInputStream;
import java.net.URL;
import java.nio.file.Files;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Vector;
import java.util.stream.Collectors;

/**
 * 分片上传
 *
 * @author zyl
 * @Description
 * @since 2023/7/20 10:05
 */
@Service
@Slf4j
public class SplitUploadTaskServiceImpl implements SplitUploadTaskService {

    // 预签名url过期时间(ms)
    public static final Long PRE_SIGN_URL_EXPIRE = 60 * 10 * 1000L;

    @Resource
    private Map<String, AmazonS3> amazonS3ClientMap;
//    @Resource
//    private CacheDelService cacheDelService;
    @Resource
    private StFileMapper stFileMapper;



    @Value("${upload.chunk-size}")
    private Long chunkSize;
    @Value("${upload.lan-proxy-ip}")
    private String lanProxyIp;
    @Value("${upload.lan-proxy-port}")
    private String lanProxyPort;
    @Value("${upload.wan-proxy-ip}")
    private String wanProxyIp;
    @Value("${upload.wan-proxy-port}")
    private String wanProxyPort;
    @Resource
    private WebSocketFrameHandler webSocketHandler;

    /**
     * 分片文件临时存储文件夹
     */
    private String temporaryFolder = "/temporaryFolder";

    @Resource
    private StEquipmentMapper stEquipmentMapper;

    @Resource
    private LocalSplitFileService localSplitFileService;

//    @Resource
//    private StorageEquipmentService storageEquipmentService;

//    // 引入 (平台)事务管理器，Spring 事务策略的核心。
//    @Resource
//    private PlatformTransactionManager transactionManager;

    /*** -------------------------------------------------- 分片上传(前端请求OSS服务进行上传) --------------------------------------------------*/


    @Override
    public StFile getByIdentifier(String identifier) {
        List<StFile> stFiles = stFileMapper.selectList(new QueryWrapper<StFile>().lambda().eq(StFile::getSourceFileMd5, identifier));
        if (!CollectionUtils.isEmpty(stFiles)) {
            return stFiles.get(0);
        }
        return null;
    }


    @Override
    @Transactional(rollbackFor = Exception.class)
    @Lock4j(keys = {"#param.identifier"})
    public SplitUploadDTO initTask(SplitUploadVO param, Long userId) {
        StFile task = new StFile();
        String fileName = param.getFileName();
        String fileName1 = IdUtil.randomUUID();
        String suffix = fileName.substring(fileName.lastIndexOf(".") + 1, fileName.length());
        String key = "";
        String bucketName = "";
        String uploadId = "";
        Date currentDate = new Date();
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectById(param.getEquipmentId());
//        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);
        if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
            //首先获取当前时间作为文件上传到云存储服务的文件夹名称，同时获取云存储服务的bucket名称以及上传的文件名。
            bucketName = stEquipment.getBucket();
            key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), fileName1, suffix);
            //根据上传的文件名获取文件后缀名，并使用工具类StrUtil和DateUtil生成唯一的文件名。
            //获取上传文件的Content-Type，并创建一个ObjectMetadata对象来保存Content-Type信息。
            String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
            ObjectMetadata objectMetadata = new ObjectMetadata();
            objectMetadata.setContentType(contentType);
            //使用Amazon S3 SDK的InitiateMultipartUploadRequest方法初始化一个上传请求，并传入bucket名称、文件名和ObjectMetadata对象。
            InitiateMultipartUploadResult initiateMultipartUploadResult = null;
            try {
                initiateMultipartUploadResult = amazonS3ClientMap.get(param.getEquipmentId())
                        .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key).withObjectMetadata(objectMetadata));
            } catch (AmazonServiceException e) {
                if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                    log.error("存储桶不存在，连接失败。");
                    AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
                } else {
                    log.error("连接出错：" + e.getMessage());
                    AssertUtils.isTrue(true, "连接出错，请核对相关配置");
                }
            }
            uploadId = initiateMultipartUploadResult.getUploadId();
            task.setUrl(getPath(stEquipment.getStorageAddress(), bucketName, key));
        } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
            bucketName = stEquipment.getBasePath();
            key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), fileName1, suffix);
            uploadId = IdUtil.randomUUID();
            task.setUrl(StrUtil.format("{}{}/{}", stEquipment.getStorageAddress(), bucketName, key));
        } else {
            throw new MyException(ResultCode.SYSTEM_ERROR, "服务器连接失败");
        }
        int chunkNum = (int) Math.ceil(param.getTotalSize() * 1.0 / param.getChunkSize());
        task.setBucketName(bucketName)
                .setChunkNum(chunkNum)
                .setChunkSize(param.getChunkSize())
                .setSize(param.getTotalSize())
                .setFileMd5(param.getIdentifier())
                .setOriginalFilename(fileName)
                .setFilename(fileName1)
                .setExt(suffix)
                .setFilePath(bucketName + "/" + key)
                .setObjectKey(key)
                .setCreateUser(userId)
                .setUploadId(uploadId)
                .setBusiBatchNo(param.getBusiBatchNo())
                .setSourceFileMd5(param.getSourceFileMd5())
                .setEquipmentId(param.getEquipmentId());
        StFile byIdentifier = getByIdentifier(param.getIdentifier());
        if (!ObjectUtil.isNull(byIdentifier)) {
            throw new MyException(ResultCode.PARAM_ERROR, "本次上传有内容一样的文件，只能成功上传其中一个");
        }
        stFileMapper.insert(task);
//        //添加 synchronized 是为了防止同时上传相同文件
//        synchronized (this){
//            StFile byIdentifier = getByIdentifier(param.getIdentifier());
//            if (!ObjectUtil.isNull(byIdentifier)){
//                throw new MyException(ResultCode.PARAM_ERROR,"本次上传有内容一样的文件，只能成功上传其中一个");
//            }
//            //新发起一个事务
//            DefaultTransactionDefinition transactionDefinition = new DefaultTransactionDefinition();
//            transactionDefinition.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
//            //新发起一个事务
//            TransactionStatus transaction = transactionManager.getTransaction(transactionDefinition);
//            stFileMapper.insert(task);
//            transactionManager.commit(transaction);
//        }
        SplitUploadRecordDTO dto = new SplitUploadRecordDTO();
        BeanUtil.copyProperties(task, dto);
        return new SplitUploadDTO().setFinished(false).setTaskRecord(dto).setPath(getPath(stEquipment.getStorageAddress(), bucketName, key));
    }

    @Override
    public String getPath(String endpoint, String bucket, String objectKey) {
        if (ObjectUtil.isNull(endpoint) || endpoint.length() == 0){
            return StrUtil.format("{}{}/{}", endpoint, bucket, objectKey);
        }
        return StrUtil.format("{}/{}/{}", endpoint, bucket, objectKey);
    }

    @Override
    public SplitUploadDTO getTaskInfo(String identifier) {
        long l = System.currentTimeMillis();

        StFile task = getByIdentifier(identifier);
        long l1 = System.currentTimeMillis();
        System.out.println("1:" + (l1 - l));
        if (task == null) {
            return null;
        }
        SplitUploadRecordDTO dto = new SplitUploadRecordDTO();
        BeanUtil.copyProperties(task, dto);
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);


        SplitUploadDTO result = new SplitUploadDTO().setFinished(true).setTaskRecord(dto).setPath(getPath(stEquipment.getStorageAddress(), task.getBucketName(), task.getObjectKey()));
        long l2 = System.currentTimeMillis();
        System.out.println("2：" + (l2 - l1));
        if (StateConstants.COMMON_ONE.equals(task.getIsUploadOk())){
            return result;
        }
        //当任意一个存储设备上成功上传过某个文件，再向另一个存储设备上传相同的文件，不在这个设备重新上传，直接秒传
        if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
            boolean doesObjectExist = false;
            try {
                doesObjectExist = amazonS3ClientMap.get(task.getEquipmentId()).doesObjectExist(task.getBucketName(), task.getObjectKey());
            } catch (AmazonServiceException e) {
                if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                    log.error("存储桶不存在，连接失败。");
                    AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
                } else {
                    log.error("连接出错：" + e.getMessage());
                    AssertUtils.isTrue(true, "连接出错，请核对相关配置");
                }
            }
            if (!doesObjectExist) {
                // 未上传完，返回已上传的分片
                try {
                    ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
                    PartListing partListing = amazonS3ClientMap.get(task.getEquipmentId()).listParts(listPartsRequest);
                    result.setFinished(false).getTaskRecord().setExitPartList(partListing.getParts());
                } catch (AmazonS3Exception e) {
                    log.info(task.getUploadId() + "上传ID有问题，重新创建上传ID重新上传");
                    stFileMapper.deleteById(task.getId());
                    return null;
                }
            } else {
//                cacheDelService.isEmptyFileByTempDir(task.getUrl(), task.getId(), task.getExt());
            }
        } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
            //如果是nas存储
            //判断文件是否上传成功
            SftpATTRS lstat = null;
            ChannelSftp channelSftp = null;
            try {
                channelSftp = FtpUtils.getConnect(stEquipment.getDomainName().split(":")[0], Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
                lstat = channelSftp.lstat(task.getFilePath());

                if (lstat == null) {
                    //            if (!StateConstants.COMMON_ONE.equals(task.getIsUploadOk())){
                    //此步骤只徐然返回的list的size对应已上传到数量就行，用于前端来判断续传前已经传了多少个子文件了
                    List<PartSummary> exitPartList = new ArrayList<>();
                    //1、根据index来合并指定temporaryFolder里面的分片文件
                    File file = new File(stEquipment.getBasePath() + temporaryFolder + File.separator
                            + result.getTaskRecord().getBusiBatchNo() + File.separator
                            + result.getTaskRecord().getFileMd5());
                    //获取分片文件集
                    extracted(exitPartList, file.listFiles());
                    result.setFinished(false).getTaskRecord().setExitPartList(exitPartList);
                } else {
//                    cacheDelService.isEmptyFileByTempDir(task.getFilePath(), task.getId(), task.getExt(), stEquipment.getDomainName().split(":")[0],
//                            Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
                }
            } catch (SftpException e) {
                log.error("没有文件,{}",e);
                result.setFinished(false);
            } catch (Exception e) {
                log.error("ftp连接失败,{}",e);
            } finally {
                channelSftp.disconnect();
            }
        } else {
            throw new MyException(ResultCode.SYSTEM_ERROR, "服务器连接失败");
        }
        long l3 = System.currentTimeMillis();
        System.out.println("2：" + (l3 - l2));
        return result;
    }

    private void extracted(List<PartSummary> exitPartList, File[] files) {
        if (null != files && files.length > 0) {
            for (File item : files) {
                PartSummary partSummary = new PartSummary();
                partSummary.setPartNumber(Integer.valueOf(item.getName().split("-")[0]));
                partSummary.setSize(item.length());
                exitPartList.add(partSummary);
            }
        }

    }

    @Override
    public SplitUploadDTO secondUpload(SplitUploadVO param, Long userId) {
        StFile task = getByIdentifier(param.getIdentifier());
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        if (task == null) {
            return null;
        }
        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);
        boolean doesObjectExist = amazonS3ClientMap.get(task.getEquipmentId()).doesObjectExist(task.getBucketName(), task.getObjectKey());
        if (doesObjectExist) {
            Date currentDate = new Date();
            String bucketName = stEquipment.getBucket();
            String fileName = param.getFileName();
            //根据上传的文件名获取文件后缀名，并使用工具类StrUtil和DateUtil生成唯一的文件名。
            String suffix = fileName.substring(fileName.lastIndexOf(".") + 1, fileName.length());
            String fileName1 = IdUtil.randomUUID();
            String key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), fileName1, suffix);
            amazonS3ClientMap.get(task.getEquipmentId()).copyObject(task.getBucketName(), task.getObjectKey(), bucketName, key);
            StFile task1 = new StFile();
            int chunkNum = (int) Math.ceil(param.getTotalSize() * 1.0 / param.getChunkSize());
            task1.setBucketName(bucketName)
                    .setChunkNum(chunkNum)
                    .setChunkSize(param.getChunkSize())
                    .setSize(param.getTotalSize())
                    .setFileMd5(param.getIdentifier())
                    .setOriginalFilename(fileName)
                    .setFilename(fileName1)
                    .setExt(suffix)
                    .setFilePath(getPath(stEquipment.getStorageAddress(), bucketName, key))
                    .setObjectKey(key)
                    .setCreateUser(userId)
                    .setBusiBatchNo(param.getBusiBatchNo());
            stFileMapper.insert(task1);
        }
        return null;
    }

    @Override
    public String genPreSignUploadUrl(String identifier, Integer partNumber, String type, Boolean isOpen) {
        AssertUtils.isNull(type, "type不能为空");
        StFile task = getByIdentifier(identifier);
        AssertUtils.isNull(task, "分片任务不存在");
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);
        String newIP = null;
        String newPort = null;
        if ("1".equals(type)) {
            newIP = lanProxyIp;
            newPort = lanProxyPort;
        } else {
            newIP = wanProxyIp;
            newPort = wanProxyPort;
        }
        if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
            Map<String, String> params = new HashMap<>();
            params.put("partNumber", partNumber.toString());
            params.put("uploadId", task.getUploadId());
            Date currentDate = new Date();
            Date expireDate = DateUtil.offsetMillisecond(currentDate, PRE_SIGN_URL_EXPIRE.intValue());
            GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(task.getBucketName(), task.getObjectKey())
                    .withExpiration(expireDate).withMethod(HttpMethod.PUT);
            if (params != null) {
                params.forEach((key, val) -> request.addRequestParameter(key, val));
            }
            URL preSignedUrl = null;
            try {
                preSignedUrl = amazonS3ClientMap.get(task.getEquipmentId()).generatePresignedUrl(request);
            } catch (AmazonServiceException e) {
                if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                    log.error("存储桶不存在，连接失败。");
                    AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
                } else {
                    log.error("连接出错：" + e.getMessage());
                    AssertUtils.isTrue(true, "连接出错，请核对相关配置");
                }
            }
            String url = preSignedUrl.toString();

            String modifiedUrl = url.replace(preSignedUrl.getHost() + ":" + preSignedUrl.getPort(), newIP + ":" + newPort);
            return modifiedUrl;
        } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
            StringBuffer nasUrl = new StringBuffer();
            nasUrl.append("http://" + newIP + ":" + newPort + "/" + "web-api/storage" + (isOpen ? "/api" : "") + "/storage/oss/splitUpload/uploadSplit?");
            nasUrl.append("partNumber=" + partNumber);
            nasUrl.append("&identifier=" + task.getFileMd5());
            nasUrl.append("&fileName=" + task.getFilename() + "." + task.getExt());
            nasUrl.append("&equipmentId=" + task.getEquipmentId());
            nasUrl.append("&busiBatchNo=" + task.getBusiBatchNo());
            return nasUrl.toString();
        }
        return null;
    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public StFile merge(String identifier, Boolean isFlat) {
        long start = System.currentTimeMillis();
        AssertUtils.isNull(isFlat, "isFlat：是否偏离矫正判断条件不能为空");
        StFile task = getByIdentifier(identifier);

        if (task == null) {
            throw new RuntimeException("分片任务不存");
        }
        //通过指定设备id，来确定存储路径/存储方式
        StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, task.getEquipmentId()));
        //测试存储设备连接
//        storageEquipmentService.testConnect(stEquipment);
        if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
            //判断文件是否存在。
            boolean doesObjectExist = false;
            try {
                doesObjectExist = amazonS3ClientMap.get(task.getEquipmentId()).doesObjectExist(task.getBucketName(), task.getObjectKey());
            } catch (AmazonServiceException e) {
                if (e.getStatusCode() == 404 && "NoSuchBucket".equals(e.getErrorCode())) {
                    log.error("存储桶不存在，连接失败。");
                    AssertUtils.isTrue(true, "存储桶不存在，连接失败。");
                } else {
                    log.error("连接出错：" + e.getMessage());
                    AssertUtils.isTrue(true, "连接出错，请核对相关配置");
                }
            }
            if (doesObjectExist) {
                return new StFile();
            }
            ListPartsRequest listPartsRequest = new ListPartsRequest(task.getBucketName(), task.getObjectKey(), task.getUploadId());
            PartListing partListing = amazonS3ClientMap.get(task.getEquipmentId()).listParts(listPartsRequest);
            List<PartSummary> parts = partListing.getParts();
            if (!task.getChunkNum().equals(parts.size())) {
                // 已上传分块数量与记录中的数量不对应，不能合并分块
                throw new RuntimeException("分片缺失，请重新上传");
            }
            CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                    .withUploadId(task.getUploadId())
                    .withKey(task.getObjectKey())
                    .withBucketName(task.getBucketName())
                    .withPartETags(parts.stream().map(partSummary -> new PartETag(partSummary.getPartNumber(), partSummary.getETag())).collect(Collectors.toList()));
            amazonS3ClientMap.get(task.getEquipmentId()).completeMultipartUpload(completeMultipartUploadRequest);
            long end1 = System.currentTimeMillis();
            log.info("（minio）合并文件的操作耗时：{}(毫秒)", end1 - start);
            addBusiBatchNoRedis(task.getBusiBatchNo());
            if (FileCheckUtils.isImage(Collections.singletonList(task.getUrl())) && isFlat) {
                log.info("进行了偏离矫正");
            }
            cache(task);
        } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
            SplitUploadVO splitUploadVO = new SplitUploadVO();
            BeanUtils.copyProperties(task, splitUploadVO);
            splitUploadVO.setIdentifier(task.getFileMd5());
            splitUploadVO.setFileName(task.getFilename() + "." + task.getExt());
            localSplitFileService.merge(splitUploadVO);
            long end1 = System.currentTimeMillis();
            log.info("（nas存储）合并文件的操作耗时：{}(毫秒)", end1 - start);
            if (FileCheckUtils.isImage(task.getFilePath(), stEquipment.getDomainName().split(":")[0],
                    Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret()) && isFlat) {

                log.info("进行了偏离矫正");
            }
            cacheLocal(task, stEquipment);
        }
        stFileMapper.update(null, new UpdateWrapper<StFile>()
                .set("is_upload_ok", StateConstants.COMMON_ONE)
                .eq("id", task.getId()));
        long end = System.currentTimeMillis();
        log.info("合并文件总耗时（合并+合并后操作）：{}(毫秒)", end - start);
        return task;
    }

    /**
     * 缓存本地
     *
     * @param task
     * @param stEquipment
     */
    public void cacheLocal(StFile task, StEquipment stEquipment) {
//        cacheDelService.isEmptyFileByTempDir(task.getFilePath(), task.getId(), task.getExt(), stEquipment.getDomainName().split(":")[0],
//                Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
    }

    /**
     * 缓存
     *
     * @param task
     */
    public void cache(StFile task) {
//        cacheDelService.isEmptyFileByTempDir(task.getUrl(), task.getId(), task.getExt());
    }

    /** -------------------------------------------------- 私有方法 --------------------------------------------------*/


    /*** -------------------------------------------------- 分片上传(后端请求OSS服务进行上传) --------------------------------------------------*/

    /**
     * S3后端分片上传
     * @param fileByte 文件
     * @param userId 用户id
     * @param stEquipmentId 设备id
     * @param fileName
     * @return
     */
    @Override
    @Transactional(rollbackFor = Exception.class)
    public StFileDTO useS3Upload(byte[] fileByte, Long userId, Long stEquipmentId ,String fileName) {
        try {
            //通过指定设备id，来确定存储路径/存储方式
            StEquipment stEquipment = stEquipmentMapper.selectOne(new LambdaQueryWrapper<StEquipment>().eq(StEquipment::getId, stEquipmentId));
            //测试存储设备连接
//            storageEquipmentService.testConnect(stEquipment);
            StFile stFile = new StFile();
            String suffix = fileName.substring(fileName.lastIndexOf(".") + 1, fileName.length());
//            String newFileName = fileName.substring(0, fileName.lastIndexOf(".")) + ".mp4";
            //上传
            String uploadId = null;
            String md5 = Md5Utils.calculateMD5(fileByte);
            ObjectMetadata objectMetadata = new ObjectMetadata();
            String fileName1 = IdUtil.randomUUID();
            Date currentDate = new Date();
            String key = StrUtil.format("{}/{}.{}", DateUtil.format(currentDate, "YYYY-MM-dd"), fileName1, suffix);
            String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
            objectMetadata.setContentType(contentType);
            int fileSize = fileByte.length;
            if (fileSize > 0) {
                //分片上传
                //检查该文件是否创建过分片任务
                stFile = getByIdentifier(md5);
                if (StateConstants.COMMON_ONE.equals(stFile.getIsUploadOk())){
                    StFileDTO stFileDTO = new StFileDTO();
                    BeanUtils.copyProperties(stFile, stFileDTO);
                    return stFileDTO;
                }
                if (!ObjectUtil.isNull(stFile)) {
                    uploadId = stFile.getUploadId();
                    key = stFile.getObjectKey();
                } else {
                    if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
                        //初始化一个分块上传，获取分块上传ID，桶名 + 对像名 + 分块上传ID 唯一确定一个分块上传
                        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(stEquipment.getBucket(), key);
                        //您还可以在初始化分块上传时，设置文件的Content-Type
                        initRequest.setObjectMetadata(objectMetadata);
                        InitiateMultipartUploadResult initResult = amazonS3ClientMap.get(stEquipmentId).initiateMultipartUpload(initRequest);
                        uploadId = initResult.getUploadId();
                    } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
                        uploadId = IdUtil.randomUUID();
                    }
                    stFile = saveFile(fileName, fileName1, md5, key, fileSize, chunkSize, uploadId, userId,
                            UUID.randomUUID().toString(), StateConstants.ZERO, suffix, stEquipment);
                }
                if (StateConstants.COMMON_STORAGE_TYPE_OBJ.equals(stEquipment.getStorageType())) {
                    //对象存储、OSS存储
                    s3UploadBack(fileByte, objectMetadata, key, stEquipment.getBucket(), uploadId, stEquipmentId, stFile);
                } else if (StateConstants.COMMON_STORAGE_TYPE_LOCAL.equals(stEquipment.getStorageType())) {
                    //NAS存储
                    nasUploadBack(fileByte, stEquipment, stFile);
                }
                stFileMapper.update(null, new UpdateWrapper<StFile>()
                        .set("is_upload_ok", StateConstants.COMMON_ONE)
                        .eq("upload_id", uploadId));
            }
//            else {
//                //直接上传
//                try {
//                    File convert = convert(file);
//                    // 创建进度监听器
//                    ProgressListenerMy progressListener = new ProgressListenerMy(file.getSize(), userId, newFileName , md5);
//                    // 设置进度监听器
//                    PutObjectRequest putObjectRequest = new PutObjectRequest(stEquipment.getBucket(), key, convert)
//                            .withGeneralProgressListener(progressListener);
//                    amazonS3ClientMap.get(stEquipmentId).putObject(putObjectRequest);
//                    //删除临时文件
//                    convert.delete();
//                } catch (Exception e) {
//                    log.info("上传出错,{}",e);
//                }
//                stFile = saveFile(newFileName, fileName1, md5, key, file.getSize(), chunkSize, uploadId, userId,
//                        null, StateConstants.COMMON_ONE, suffix, stEquipmentId);
//            }
            StFileDTO StFileDTO = new StFileDTO();
            BeanUtils.copyProperties(stFile, StFileDTO);
            return StFileDTO;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return null;
    }

    /**
     * 取消上传
     * @param md5
     * @param stEquipmentId
     */
    @Override
    public void cancelFileUpload(String md5, String stEquipmentId) {
        AssertUtils.isNull(md5,"参数错误,md5为空");
        AssertUtils.isNull(stEquipmentId,"参数错误,stEquipmentId为空");
        StFile stFile = getByIdentifier(md5);
        AssertUtils.isNull(stFile,"参数错误");
        AssertUtils.isNull(stFile.getBucketName(),"参数错误,stFile.getBucketName()为空");
        AssertUtils.isNull(stFile.getObjectKey(),"参数错误，stFile.getObjectKey()为空");
        AssertUtils.isNull(stFile.getUploadId(),"参数错误,stFile.getUploadId()为空");
        AmazonS3 amazonS3 = amazonS3ClientMap.get(stEquipmentId);
        AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(stFile.getBucketName(), stFile.getObjectKey(), stFile.getUploadId());
        amazonS3.abortMultipartUpload(abortRequest);
        log.info("文件上传已取消,已经上传的分片已被删除");
    }

    /**
     * -------------------------------------------------- 私有方法 --------------------------------------------------
     */


    private void addBusiBatchNoRedis(String busiBatchNo) {
        // 创建一个 Date 对象，表示当前时间
        Date now = new Date();
        // 创建一个 SimpleDateFormat 对象，指定输出格式为 yyyy-MM-dd HH:mm
        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");
        // 使用 SimpleDateFormat 对象将 Date 对象格式化为字符串
        String dateString = sdf.format(now);
    }


    private StFile saveFile(String newFileName, String fileName1, String md5, String key, long size, Long i, String uploadId, Long userId, String busiBatchNo, int isUploadOk, String suffix, StEquipment stEquipment) {
        StFile task = new StFile();
        task.setBucketName(stEquipment.getBucket())
                .setChunkNum(Math.toIntExact(size / i))
                .setChunkSize(Long.valueOf(i))
                .setSize(size)
                .setFileMd5(md5)
                .setOriginalFilename(newFileName)
                .setFilename(fileName1)
                .setExt(suffix)
                .setFilePath(stEquipment.getBucket() + "/" + key)
                .setObjectKey(key)
                .setCreateUser(userId)
                .setUrl(getPath(stEquipment.getStorageAddress(), stEquipment.getBucket(), key))
                .setEquipmentId(stEquipment.getId())
                .setUploadId(uploadId)
                .setSourceFileMd5(md5)
                .setBusiBatchNo(busiBatchNo)
                .setIsUploadOk(isUploadOk);
        stFileMapper.insert(task);
        return task;
    }


    private String s3UploadBack(byte[] fileByte, ObjectMetadata objectMetadata, String key, String bucket, String uploadId, Long stEquipmentId, StFile stFile) {
        ThreadLocal<List<PartETag>> partETags = new ThreadLocal<>();
        partETags.set(new ArrayList<>());
        ThreadLocal<Long> filePosition = new ThreadLocal<>();
        filePosition.set(0L);
        File file = convert(fileByte);
        long contentLength = file.length();
        long partSize = chunkSize; // 5MB
        int partNumber = 1;
        String eTagMarker = null;
        // 检查是否存在之前上传的分片记录
        List<PartETag> existingPartETags = getExistingPartETags(bucket, key, uploadId, stEquipmentId);
        if (existingPartETags != null && !existingPartETags.isEmpty()) {
            partETags.set(new ArrayList<>(existingPartETags));
            partNumber = existingPartETags.size() + 1;
            // 设置已上传的分片标识
            eTagMarker = existingPartETags.get(existingPartETags.size() - 1).getETag();
            filePosition.set(partSize * existingPartETags.size());
        }
        for (; filePosition.get() < contentLength; partNumber++) {
            // 检查当前分片是否已上传
            if (isPartUploaded(partETags.get(), partNumber)) {
                continue; // 跳过已上传的分片
            }

            // 计算当前分片大小
            long remainingSize = contentLength - filePosition.get();
            partSize = Math.min(partSize, remainingSize);

            // 创建上传分片请求
            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withBucketName(bucket)
                    .withKey(key)
                    .withUploadId(uploadId)
                    .withPartNumber(partNumber)
                    .withFileOffset(filePosition.get())
                    .withFile(file)
                    .withPartSize(partSize)
                    .withGeneralProgressListener(progressListener(filePosition.get(), contentLength, stFile));

            // 如果是加密的，需要加入这步
            if (filePosition.get() + partSize == contentLength) {
                uploadRequest.setLastPart(true);
            }

            // 上传分片
            UploadPartResult uploadResult = amazonS3ClientMap.get(stEquipmentId).uploadPart(uploadRequest);
            partETags.get().add(uploadResult.getPartETag());
            filePosition.set(filePosition.get() + partSize);
        }
        // 检查所有分片是否已上传完成
        if (isMultipartUploadComplete(bucket, key, uploadId, partETags.get(), stEquipmentId)) {
            // 合并分片
            CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                    bucket, key, uploadId, partETags.get());
            amazonS3ClientMap.get(stEquipmentId).completeMultipartUpload(completeRequest);
            log.info("完成合并分片");
        }
        // 清理ThreadLocal中的数据
        partETags.remove();
        filePosition.remove();
        //删除临时文件
        file.delete();
        return uploadId;
    }

    private File convert(byte[] fileBytes) {
        try {
            File tempFile = File.createTempFile("temp", null);
            OutputStream outputStream = new FileOutputStream(tempFile);
            outputStream.write(fileBytes);
            return tempFile;
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
        return null;
    }

    // 获取已上传的分片记录
    private List<PartETag> getExistingPartETags(String bucketName, String key, String uploadId, Long stEquipmentId) {
        List<PartETag> existingPartETags = new ArrayList<>();

        try {
            ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, key, uploadId);
            PartListing partListing;
            do {
                partListing = amazonS3ClientMap.get(stEquipmentId).listParts(listPartsRequest);
                List<PartSummary> parts = partListing.getParts();
                for (PartSummary partSummary : parts) {
                    PartETag partETag = new PartETag(partSummary.getPartNumber(), partSummary.getETag());
                    existingPartETags.add(partETag);
                }
                listPartsRequest.setPartNumberMarker(partListing.getNextPartNumberMarker());
            } while (partListing.isTruncated());
        } catch (AmazonS3Exception e) {
            // 处理异常情况
            e.printStackTrace();
        }

        return existingPartETags;
    }

    // 检查分片是否已上传
    private boolean isPartUploaded(List<PartETag> partETags, int partNumber) {
        for (PartETag partETag : partETags) {
            if (partETag.getPartNumber() == partNumber) {
                return true; // 分片已上传
            }
        }
        return false; // 分片未上传
    }

    // 检查分片上传是否完成
    private boolean isMultipartUploadComplete(String bucketName, String key, String uploadId, List<PartETag> partETags, Long stEquipmentId) {
        try {
            ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, key, uploadId);
            PartListing partListing;
            do {
                partListing = amazonS3ClientMap.get(stEquipmentId).listParts(listPartsRequest);
                List<PartSummary> parts = partListing.getParts();
                if (parts.size() != partETags.size()) {
                    return false;
                }
//                for (PartSummary partSummary : partListing.getParts()) {
//                    PartETag partETag = new PartETag(partSummary.getPartNumber(), partSummary.getETag());
//                    if (!partETags.contains(partETag)) {
//                        return false; // 分片上传未完成
//                    }
//                }
                listPartsRequest.setPartNumberMarker(partListing.getNextPartNumberMarker());
            } while (partListing.isTruncated());
        } catch (AmazonS3Exception e) {
            // 处理异常情况
            e.printStackTrace();
            return false;
        }

        return true; // 分片上传完成
    }

    /**
     * 进度条
     * @param uploadedBytes
     * @param totalBytes
     * @param stFile
     * @return
     */
    private ProgressListener progressListener(long uploadedBytes, long totalBytes, StFile stFile) {
        final long[] uploadedByte = {uploadedBytes};
        return new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                ProgressEventType eventType = progressEvent.getEventType();
                if (eventType == ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT) {
                    // 更新已上传字节数
                    uploadedByte[0] += progressEvent.getBytesTransferred();
                    // 计算上传进度百分比
                    int progressPercentage = (int) ((uploadedByte[0] * 100) / totalBytes);
                    log.info("已上传:{}",progressPercentage);
                    // 将上传进度信息传递给前端
                    // 推送进度信息给WebSocket连接
                    webSocketHandler.send(stFile.getCreateUser(), createProgressMessage(progressPercentage, stFile));
                } else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT){
                    log.info("上传完成");
                    // 推送进度信息给WebSocket连接
                    webSocketHandler.send(stFile.getCreateUser(), String.valueOf(100));
                } else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_STARTED_EVENT) {
                    log.info("上传开始");
                }
            }
        };
    }

    /**
     * 发送WebSocket的信息
     * @param progressPercentage
     * @param stFile
     * @return
     */
    private String createProgressMessage(int progressPercentage, StFile stFile) {
        ProgressMessageDTO progressMessage = new ProgressMessageDTO();
        progressMessage.setProgress(progressPercentage);
        progressMessage.setMd5(stFile.getSourceFileMd5());
        progressMessage.setNewFileName(stFile.getOriginalFilename());
        progressMessage.setFileId(stFile.getId());
        // 创建进度信息的 JSON 字符串
        String progressMessages = JsonUtils.toJSONString(progressMessage);
        return progressMessages;
    }

    private void uploadOss(StFile task, ByteArrayInputStream inputStream) {
        //从数据库中拿配置
//        StEquipment stEquipment = stEquipmentMapper.selectById(task.getEquipmentId());
//        String endpoint = stEquipment.getDomainName();
//        String accessKeyId = stEquipment.getAccessKey();
//        String accessKeySecret = stEquipment.getAccessSecret();
//        String bucketName = stEquipment.getBucket();
//
//        BasicAWSCredentials credentials = new BasicAWSCredentials(accessKeyId, accessKeySecret);
//        ClientConfiguration clientConfiguration = new ClientConfiguration();
//        clientConfiguration.setProtocol(Protocol.HTTP);
//
//        AmazonS3 s3Client = AmazonS3ClientBuilder.standard()
//                .withEndpointConfiguration(new AmazonS3ClientBuilder.EndpointConfiguration(endpoint, ""))
//                .withPathStyleAccessEnabled(false)
//                .withClientConfiguration(clientConfiguration)
//                .withCredentials(new AWSStaticCredentialsProvider(credentials))
//                .build();
//        //key：存在OSS中的路径
//        String key = StrUtil.format("{}/{}.{}", DateUtil.format(new Date(), "YYYY-MM-dd"),IdUtil.randomUUID() , task.getExt());
//        ObjectMetadata objectMetadata = new ObjectMetadata();
//        String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
//        objectMetadata.setContentType(contentType);
//        s3Client.putObject(bucketName,key,inputStream,objectMetadata);
//        stFileMapper.update(null,new UpdateWrapper<StFile>()
//                .set("url",StrUtil.format("{}/{}/{}", endpoint, bucketName, key))
//                .eq("id",task.getId()));
        //从nacos中拿配置
        String key = StrUtil.format("{}/{}.{}", DateUtil.format(new Date(), "YYYY-MM-dd"), IdUtil.randomUUID(), task.getExt());
        ObjectMetadata objectMetadata = new ObjectMetadata();
        String contentType = MediaTypeFactory.getMediaType(key).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
        objectMetadata.setContentType(contentType);
//        amazonS3.putObject(ossProperties.getBucket(),key,inputStream,objectMetadata);
//        String url = StrUtil.format("{}/{}/{}", ossProperties.getEndpoint(), ossProperties.getBucket(), key);
//        stFileMapper.update(null,new UpdateWrapper<StFile>()
//                .set("url", url)
//                .eq("id",task.getId()));
//        task.setUrl(url);
        amazonS3ClientMap.get(task.getEquipmentId()).putObject(task.getBucketName(), task.getObjectKey(), inputStream, objectMetadata);
    }

    public InputStream getLocalInputStream(String urlStr, String host, Integer port, String username, String password) {
        ChannelSftp connect = null;
        InputStream inputStream = null;
        try {
            connect = FtpUtils.getConnect(host, port, username, password);
            inputStream = connect.get(urlStr);
            return inputStream;
        } catch (Exception e) {
        } finally {
            connect.disconnect();
        }
        return inputStream;
    }

    /**
     * NAS分片上传
     * @param fileByte
     * @param stEquipment
     * @param stFile
     */
    private void nasUploadBack(byte[] fileByte, StEquipment stEquipment, StFile stFile) {
        ThreadLocal<List<PartSummary>> partETags = new ThreadLocal<List<PartSummary>>();
        partETags.set(new ArrayList<>());
        ThreadLocal<Long> filePosition = new ThreadLocal<>();
        filePosition.set(0L);
        File file = convert(fileByte);
        long contentLength = file.length();
        long partSize = chunkSize; // 5MB
        int partNumber = 1;
        String eTagMarker = null;
        // 检查是否存在之前上传的分片记录
        List<PartSummary> existingPartETags = getExistingPartETagsByFTP(stFile, stEquipment);
        if (existingPartETags != null && !existingPartETags.isEmpty()) {
            partETags.set(new ArrayList<>(existingPartETags));
            partNumber = existingPartETags.size() + 1;
            // 设置已上传的分片标识
            eTagMarker = existingPartETags.get(existingPartETags.size() - 1).getETag();
            filePosition.set(partSize * existingPartETags.size());
        }
        for (; filePosition.get() < contentLength; partNumber++) {
            // 检查当前分片是否已上传
            if (isPartUploadedByFTP(partETags.get(), partNumber)) {
                continue; // 跳过已上传的分片
            }

            // 计算当前分片大小
            long remainingSize = contentLength - filePosition.get();
            partSize = Math.min(partSize, remainingSize);
            // 上传分片
            uploadSplitByFTP(file,stFile,partNumber,stEquipment,partSize,chunkSize);

            PartSummary partSummary = new PartSummary();
            partSummary.setPartNumber(partNumber);
            partSummary.setSize(partSize);

            partETags.get().add(partSummary);
            filePosition.set(filePosition.get() + partSize);
        }
        // 清理ThreadLocal中的数据
        partETags.remove();
        filePosition.remove();
        // 检查所有分片是否已上传完成
        if (isMultipartUploadCompleteByFTP(stFile, stEquipment)) {
            // 合并分片
            SplitUploadVO splitUploadVO = new SplitUploadVO();
            splitUploadVO.setBusiBatchNo(stFile.getBusiBatchNo());
            splitUploadVO.setIdentifier(stFile.getSourceFileMd5());
            splitUploadVO.setEquipmentId(stEquipment.getId());
            splitUploadVO.setFileName(stFile.getFilename() + "." + stFile.getExt());
            merge(stFile.getBusiBatchNo(),stFile.getSourceFileMd5(),stFile.getFilename() + "." + stFile.getExt(),stEquipment);
            log.info("完成合并分片");
        }
        //删除临时文件
        file.delete();
    }

    private Result uploadSplitByFTP(File file, StFile stFile, int partNumber, StEquipment stEquipment, long partSize, Long chunkSize) {
        ChannelSftp channelSftp = null;
        byte[] splitFileByte;
        try {
            channelSftp = FtpUtils.getConnect(stEquipment.getDomainName().split(":")[0], Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
        } catch (Exception e) {
            e.printStackTrace();
        }

        if (null == channelSftp) {
            return Result.error("无对应存储设备连接", ResultCode.PARAM_ERROR);
        }
        String url = stEquipment.getBasePath() + File.separator + temporaryFolder + File.separator
                + stFile.getBusiBatchNo() + File.separator
                + stFile.getSourceFileMd5() + File.separator;
        String partFileName = partNumber + "-" + stFile.getFilename() + "." + stFile.getExt();
        try {
            try {
                channelSftp.lstat(url + partFileName);
                return Result.success("");
            } catch (SftpException e) {
                FtpUtils.mkdirDir(channelSftp, url.split(File.separator), "", url.split(File.separator).length, 0);
            }
            splitFileByte = splitFile(file,partSize,partNumber - 1,chunkSize);
            FtpUtils.upload(channelSftp, url, splitFileByte, partFileName);
            channelSftp.disconnect();
            return Result.success("");
        } catch (Exception e) {
            return Result.error("上传失败", ResultCode.SYSTEM_ERROR);
        }
    }

    /**
     * 拆分文件
     * @param file 文件
     * @param partSize 默认的分片大小
     * @param partNumber 上一个分片的partNumber
     * @param chunkSize
     * @return
     */
    private byte[] splitFile(File file, long partSize, int partNumber, Long chunkSize) {
        // 读取整个文件的字节数组
        byte[] fileBytes = new byte[0];
        try {
            fileBytes = Files.readAllBytes(file.toPath());
        } catch (IOException e) {
            e.printStackTrace();
        }

        // 计算当前部分的起始和结束索引
        int startIdx = (int) (partNumber * chunkSize);
        int endIdx = (int) Math.min((partNumber + 1) * partSize, fileBytes.length);

        // 计算块大小
        int blockSize = Math.min(endIdx - startIdx, chunkSize.intValue());

        // 创建当前部分的字节数组
        byte[] partBytes = new byte[blockSize];
        System.arraycopy(fileBytes, startIdx, partBytes, 0, blockSize);
        return partBytes;
    }


    private List<PartSummary> getExistingPartETagsByFTP(StFile stFile, StEquipment stEquipment) {
        SftpATTRS lstat = null;
        ChannelSftp channelSftp = null;
        List<PartSummary> exitPartList = new ArrayList<>();
        try {
            channelSftp = FtpUtils.getConnect(stEquipment.getDomainName().split(":")[0], Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
            lstat = channelSftp.lstat(stFile.getFilePath());

            if (lstat == null) {
                //此步骤只徐然返回的list的size对应已上传到数量就行，用于前端来判断续传前已经传了多少个子文件了
                //1、根据index来合并指定temporaryFolder里面的分片文件
                File file = new File(stEquipment.getBasePath() + temporaryFolder + File.separator
                        + stFile.getBusiBatchNo() + File.separator
                        + stFile.getFileMd5());
                //获取分片文件集
                extracted(exitPartList, file.listFiles());
            }
        } catch (SftpException e) {
            log.error("没有文件,{}",e);
        } catch (Exception e) {
            log.error("ftp连接失败,{}",e);
        } finally {
            channelSftp.disconnect();
        }
        return exitPartList;
    }

    private boolean isPartUploadedByFTP(List<PartSummary> partETags, int partNumber) {
        if (!CollectionUtils.isEmpty(partETags)){
            for (PartSummary partSummary : partETags) {
                if (partSummary.getPartNumber() == partNumber) {
                    return true; // 分片已上传
                }
            }
        }
        return false; // 分片未上传
    }

    // 检查分片上传是否完成
    private boolean isMultipartUploadCompleteByFTP(StFile stFile, StEquipment stEquipment) {
        SftpATTRS lstat = null;
        ChannelSftp channelSftp = null;
        List<PartSummary> exitPartList = new ArrayList<>();
        try {
            channelSftp = FtpUtils.getConnect(stEquipment.getDomainName().split(":")[0], Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
            lstat = channelSftp.lstat(stFile.getFilePath());

            if (lstat == null) {
                return false;
            }
        } catch (SftpException e) {
            log.error("没有文件,{}",e);
        } catch (Exception e) {
            log.error("ftp连接失败,{}",e);
        } finally {
            channelSftp.disconnect();
        }

        return true; // 分片上传完成
    }

    private Result merge(String busiBatchNo, String sourceFileMd5, String fileNames, StEquipment stEquipment) {
        //通过指定设备id，来确定存储路径
        ChannelSftp channelSftp = null;
        try {
            channelSftp = FtpUtils.getConnect(stEquipment.getDomainName().split(":")[0], Integer.valueOf(stEquipment.getDomainName().split(":")[1]), stEquipment.getAccessKey(), stEquipment.getAccessSecret());
        } catch (Exception e){
            log.info("服务器连接失败。");
            return Result.error("连接出错，请核对相关配置", ResultCode.SYSTEM_ERROR);
        }
        String url = stEquipment.getBasePath() + File.separator + temporaryFolder + File.separator
                + busiBatchNo + File.separator
                + sourceFileMd5 + File.separator;
        String lastFile = stEquipment.getBasePath() + File.separator + DateUtils.getDate();
        try {
            //每隔1S重试一次，10次后url中还没有文件则返回失败
            Boolean isExist = true;
            Integer count = 10;
            while (isExist && count > 0) {
                try {
                    --count;
                    Thread.sleep(1000);
                    channelSftp.stat(url);
                    isExist = false;
                } catch (Exception e) {
                }
            }
            if(isExist){
                return Result.error("文件合并失败："+url,ResultCode.SYSTEM_ERROR);
            }
            Vector ls = channelSftp.ls(url);
            Iterator iterator = ls.iterator();
            Map<String,InputStream> splitFileMap = new HashMap<>();
            while (iterator.hasNext()) {
                ChannelSftp.LsEntry file = (ChannelSftp.LsEntry) iterator.next();
                //文件名称
                String fileName = file.getFilename();
                if (fileName.contains("-")) {
                    splitFileMap.put(fileName,channelSftp.get(url + fileName));
                }
            }
            //对分片文件按照1、2、3...排序
            Vector<InputStream> vector = sortSplitFile(splitFileMap);
            SequenceInputStream sequenceInputStream = new SequenceInputStream(vector.elements());
            ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
            byte[] buffer = new byte[1024];
            int bytesRead;
            while ((bytesRead = sequenceInputStream.read(buffer)) != -1) {
                byteArrayOutputStream.write(buffer, 0, bytesRead);
            }
            byteArrayOutputStream.close();
            FtpUtils.upload(channelSftp, lastFile, byteArrayOutputStream.toByteArray(), fileNames);
            log.info("合并完成!");
            //删除分片
            delSplit(channelSftp, url);
        } catch (Exception e) {
            throw new RuntimeException(e);
        } finally {
            channelSftp.disconnect();
        }
        return Result.success("");
    }

    /**
     * 对分片文件按照1、2、3...排序，属顺序不对拼接出的文件会损坏
     * @param map
     * @return
     */
    private Vector<InputStream> sortSplitFile(Map<String, InputStream> map){
        // 将 Map 转换为 List
        List<Entry<String, InputStream>> list = new ArrayList<>(map.entrySet());

        // 对 List 进行排序，排序规则是按照键的字典顺序排序
        Collections.sort(list, new Comparator<Entry<String, InputStream>>() {
            @Override
            public int compare(Entry<String, InputStream> o1, Entry<String, InputStream> o2) {
                int num1 = Integer.parseInt(o1.getKey().split("-")[0]);
                int num2 = Integer.parseInt(o2.getKey().split("-")[0]);
                return Integer.compare(num1, num2);
            }
        });

        // 创建一个新的 Vector<InputStream>，并按照排序后的顺序填充
        Vector<InputStream> sortedStreams = new Vector<>();
        for (Entry<String, InputStream> entry : list) {
            sortedStreams.add(entry.getValue());
        }

        return sortedStreams;
    }

    /**
     * 删除分片
     * @param channelSftp
     * @param url
     */
    private void delSplit(ChannelSftp channelSftp, String url) {
        try {
            // 列出文件夹中的所有文件
            Vector ls = channelSftp.ls(url); // url是文件夹的路径
            Iterator iterator = ls.iterator();

            while (iterator.hasNext()) {
                ChannelSftp.LsEntry file = (ChannelSftp.LsEntry) iterator.next();
                // 获取文件名称
                String fileName = file.getFilename();

                // 检查是否是文件而不是子目录
                if (!file.getAttrs().isDir()) {
                    // 使用rm方法删除文件
                    channelSftp.rm(url + fileName);
                    log.info("删除文件: {}", fileName);
                }
            }
            channelSftp.rmdir(url);
            log.info("文件夹及其所有文件已删除: {}", url);
        } catch (Exception e) {
            log.info("删除分片文件失败: {}", url);
            e.printStackTrace();
        }
    }


}
