package com.southminority.ethnic.service.alioss;

import com.aliyun.oss.OSS;
import com.aliyun.oss.OSSClient;
import com.aliyun.oss.model.*;
import com.southminority.ethnic.controller.alioss.vo.*;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.codec.digest.DigestUtils;
import org.joda.time.DateTime;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
@Service
public class OssChunkUploadServiceImpl implements OssChunkUploadService {

    @Value("${aliyun.oss.file.endpoint:default-endpoint}")
    private String endpoint;
    @Value("${aliyun.oss.file.keyid:default-keyid}")
    private String accessKeyId;
    @Value("${aliyun.oss.file.keysecret:default-keysecret}")
    private String accessKeySecret;
    @Value("${aliyun.oss.file.bucketname:default-bucket}")
    private String bucketName;

    // 分片大小：1MB
    private static final long CHUNK_SIZE = 1 * 1024 * 1024L;
    
    // 内存中存储上传信息（实际应用中应使用Redis或数据库）
    private static final Map<String, UploadContext> uploadContextMap = new ConcurrentHashMap<>();

    /**
     * 上传上下文
     */
    private static class UploadContext {
        String uploadId;
        String fileMd5;
        String fileName;
        String className;
        String objectKey;
        Map<Integer, String> chunkETags = new ConcurrentHashMap<>();
        long createTime;
        
        UploadContext(String uploadId, String fileMd5, String fileName, String className, String objectKey) {
            this.uploadId = uploadId;
            this.fileMd5 = fileMd5;
            this.fileName = fileName;
            this.className = className;
            this.objectKey = objectKey;
            this.createTime = System.currentTimeMillis();
        }
    }

    @Override
    public OssChunkInitRespVo initChunkUpload(OssChunkInitReqVo req) {
        checkOssConfig();
        
        String className = req.getClassName();
        if (className == null || "".equals(className)) {
            className = "isNullClass";
        }

        try {
            OSS ossClient = new OSSClient(endpoint, accessKeyId, accessKeySecret);
            
            String objectKey = generateObjectKey(className, req.getFileName());
            
            InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, objectKey);
            InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(request);
            String uploadId = result.getUploadId();
            
            ossClient.shutdown();
            
            uploadContextMap.put(uploadId, new UploadContext(uploadId, req.getFileMd5(), req.getFileName(), className, objectKey));
            
            log.info("初始化分片上传成功: uploadId={}, objectKey={}, fileMd5={}, fileName={}", uploadId, objectKey, req.getFileMd5(), req.getFileName());
            
            return OssChunkInitRespVo.builder()
                    .uploadId(uploadId)
                    .chunkSize(CHUNK_SIZE)
                    .build();
                    
        } catch (Exception e) {
            log.error("初始化分片上传失败", e);
            throw new RuntimeException("初始化分片上传失败: " + e.getMessage());
        }
    }

    @Override
    public OssChunkUploadRespVo uploadChunk(OssChunkUploadReqVo req) {
        checkOssConfig();
        
        try {
            String uploadId = req.getUploadId();
            UploadContext context = uploadContextMap.get(uploadId);
            
            if (context == null) {
                throw new RuntimeException("上传ID不存在或已过期");
            }
            
            // 验证分片MD5
            String calculatedMd5 = calculateMd5(req.getChunk().getInputStream());
            if (!calculatedMd5.equals(req.getChunkMd5())) {
                throw new RuntimeException("分片MD5验证失败，文件可能已损坏");
            }
            
            OSS ossClient = new OSSClient(endpoint, accessKeyId, accessKeySecret);
            
            UploadPartRequest uploadPartRequest = new UploadPartRequest();
            uploadPartRequest.setBucketName(bucketName);
            uploadPartRequest.setKey(context.objectKey);
            uploadPartRequest.setUploadId(uploadId);
            uploadPartRequest.setPartNumber(req.getChunkIndex() + 1); // 分片号从1开始
            uploadPartRequest.setInputStream(req.getChunk().getInputStream());
            uploadPartRequest.setPartSize(req.getChunk().getSize());
            
            UploadPartResult result = ossClient.uploadPart(uploadPartRequest);
            String eTag = result.getETag();
            
            ossClient.shutdown();
            
            context.chunkETags.put(req.getChunkIndex(), eTag);
            
            int progress = (int) ((context.chunkETags.size() * 100) / req.getTotalChunks());
            
            log.info("上传分片成功: uploadId={}, chunkIndex={}, progress={}%", uploadId, req.getChunkIndex(), progress);
            
            return OssChunkUploadRespVo.builder()
                    .uploadId(uploadId)
                    .chunkIndex(req.getChunkIndex())
                    .eTag(eTag)
                    .progress(progress)
                    .completed(false)
                    .build();
                    
        } catch (Exception e) {
            log.error("上传分片失败", e);
            throw new RuntimeException("上传分片失败: " + e.getMessage());
        }
    }

    @Override
    public String mergeChunks(OssChunkMergeReqVo req) {
        checkOssConfig();
        
        try {
            String uploadId = req.getUploadId();
            UploadContext context = uploadContextMap.get(uploadId);
            
            if (context == null) {
                throw new RuntimeException("上传ID不存在或已过期");
            }
            
            if (context.chunkETags.size() != req.getTotalChunks()) {
                throw new RuntimeException("分片上传不完整，缺少" + 
                        (req.getTotalChunks() - context.chunkETags.size()) + "个分片");
            }
            
            OSS ossClient = new OSSClient(endpoint, accessKeyId, accessKeySecret);
            
            List<PartETag> partETags = new ArrayList<>();
            for (int i = 0; i < req.getTotalChunks(); i++) {
                String eTag = context.chunkETags.get(i);
                if (eTag == null) {
                    throw new RuntimeException("分片" + i + "的ETag不存在");
                }
                partETags.add(new PartETag(i + 1, eTag));
            }
            
            CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                    bucketName, 
                    context.objectKey, 
                    uploadId, 
                    partETags
            );
            
            CompleteMultipartUploadResult result = ossClient.completeMultipartUpload(completeRequest);
            
            ossClient.shutdown();
            
            String fileUrl = "http://" + bucketName + "." + endpoint + "/" + result.getKey();
            
            uploadContextMap.remove(uploadId);
            
            log.info("合并分片成功: uploadId={}, fileUrl={}", uploadId, fileUrl);
            
            return fileUrl;
            
        } catch (Exception e) {
            log.error("合并分片失败", e);
            throw new RuntimeException("合并分片失败: " + e.getMessage());
        }
    }

    @Override
    public void cancelChunkUpload(String uploadId) {
        checkOssConfig();
        
        try {
            UploadContext context = uploadContextMap.get(uploadId);
            
            if (context == null) {
                return;
            }
            
            OSS ossClient = new OSSClient(endpoint, accessKeyId, accessKeySecret);
            
            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(
                    bucketName, 
                    context.objectKey, 
                    uploadId
            );
            
            ossClient.abortMultipartUpload(abortRequest);
            ossClient.shutdown();
            
            uploadContextMap.remove(uploadId);
            
            log.info("取消分片上传成功: uploadId={}", uploadId);
            
        } catch (Exception e) {
            log.error("取消分片上传失败", e);
            throw new RuntimeException("取消分片上传失败: " + e.getMessage());
        }
    }

    @Override
    public String checkFileExists(String fileMd5) {
        // 简化实现，实际应查询数据库
        return null;
    }

    /**
     * 生成OSS对象key
     */
    private String generateObjectKey(String className, String fileName) {
        String uuid = UUID.randomUUID().toString().replace("-", "");
        String suffix = getFileSuffix(fileName);
        return "gzmu_ssmz/" + className + "/" + DateTime.now().toString("yyyy/MM/dd/") + uuid + suffix;
    }

    /**
     * 获取文件后缀
     */
    private String getFileSuffix(String fileName) {
        if (fileName == null || fileName.isEmpty()) {
            return "";
        }
        int lastDotIndex = fileName.lastIndexOf(".");
        if (lastDotIndex == -1 || lastDotIndex == fileName.length() - 1) {
            return "";
        }
        return fileName.substring(lastDotIndex);
    }

    /**
     * 计算MD5
     */
    private String calculateMd5(java.io.InputStream inputStream) throws IOException {
        return DigestUtils.md5Hex(inputStream);
    }

    /**
     * 检查OSS配置
     */
    private void checkOssConfig() {
        if ("default-endpoint".equals(endpoint) || "default-keyid".equals(accessKeyId) 
            || "default-keysecret".equals(accessKeySecret) || "default-bucket".equals(bucketName)) {
            throw new RuntimeException("OSS服务未配置，请在application.yml中配置真实的阿里云OSS参数");
        }
    }
}
