package com.hzw.saas.service.storage.service;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.exceptions.ExceptionUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.thread.ThreadUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.hzw.saas.api.storage.ICatalogService;
import com.hzw.saas.api.storage.enums.ResourceStatusEnum;
import com.hzw.saas.api.storage.enums.StorageSpaceEnum;
import com.hzw.saas.common.config.util.RedisUtil;
import com.hzw.saas.common.storage.enums.StorageModeEnum;
import com.hzw.saas.common.storage.service.IFileStorageService;
import com.hzw.saas.common.storage.service.IS3FileService;
import com.hzw.saas.common.util.enums.IsEnum;
import com.hzw.saas.common.util.exception.ConflictException;
import com.hzw.saas.common.util.exception.LostException;
import com.hzw.saas.service.storage.mapper.ResourceFileMapper;
import com.hzw.saas.service.storage.mapper.ResourceFolderMapper;
import com.hzw.saas.service.storage.model.ResourceFile;
import com.hzw.saas.service.storage.model.ResourceFolder;
import com.hzw.saas.service.storage.pojo.FileStorageInfo;
import com.hzw.saas.service.storage.util.StorageUtil;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.io.File;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;

/**
 * @author zzl
 * @since 10/22/2021
 */
@Service
@Slf4j
@RequiredArgsConstructor
public class ResourceMigrateService {

    private static final String METHOD_LOCK_MIGRATE_KEY = "METHOD_LOCK:migrateFiles";
    private static final String TASK_LOCK_MIGRATE_KEY = "TASK_LOCK:migrateFiles";
    private static final String METHOD_LOCK_CLEAR_KEY = "METHOD_LOCK:clearFiles";
    private static final String TASK_LOCK_CLEAR_KEY = "TASK_LOCK:clearFiles";

    private static final String METHOD_LOCK_SEPARATE_DS_KEY = "METHOD_LOCK:separateDS";
    private static final String TASK_LOCK_SEPARATE_DS_KEY = "TASK_LOCK:separateDS";

    // 缓存
    private final RedissonClient redissonClient;

    // 存储服务
    private final ICatalogService catalogService;
    private final IFileStorageService fileStorageService;
    private final IS3FileService s3FileService;

    private final StorageModeHandler storageModeHandler;

    @Resource(name = ResourceFileServiceImpl.BEAN_NAME)
    private ServiceImpl<ResourceFileMapper, ResourceFile> fileService;
    @Resource(name = ResourceFolderServiceImpl.BEAN_NAME)
    private ServiceImpl<ResourceFolderMapper, ResourceFolder> folderService;


    public void migrateFiles2Cloud(List<String> spaceIds, Date geDate, Date ltDate, Integer threadCount) {
        if (tryMethodLock(METHOD_LOCK_MIGRATE_KEY, TASK_LOCK_MIGRATE_KEY)) return;

        ThreadUtil.execute(() -> {
            try {
                String taskName = "migrate2Cloud";

                ResourceMigrateExecutor.PageFilesHandler pageFilesHandler = page
                    -> fileService.getBaseMapper().pageColdFiles(page, spaceIds, geDate, ltDate);
                ResourceMigrateExecutor.DealStorageHandler dealFileHandler = file
                    -> storageModeHandler.dealFileStorage(
                    new FileStorageInfo(file.getCatalog(), file.getResourceKey(), file.getIsLocal(), file.getIsCloud())
                    , StorageModeEnum.SAVE_IN_CLOUD);

                log.debug("文件迁移(本地->S3)任务开始({geDate:{},ltDate:{}}...", DateUtil.formatDate(geDate), DateUtil.formatDate(ltDate));
                ResourceMigrateExecutor.execute(threadCount, taskName, pageFilesHandler, dealFileHandler);
                log.debug("文件迁移(本地->S3)任务结束({geDate:{},ltDate:{}}.", DateUtil.formatDate(geDate), DateUtil.formatDate(ltDate));
            } finally {
                RedisUtil.del(TASK_LOCK_MIGRATE_KEY);
            }
        });
    }

    public void clearDeletedFiles(List<String> spaceIds, Date geDate, Date ltDate, Integer threadCount) {
        if (tryMethodLock(METHOD_LOCK_CLEAR_KEY, TASK_LOCK_CLEAR_KEY)) return;

        ThreadUtil.execute(() -> {
            try {
                String taskName = "fileClear";
                // FIXME: ZZL@10/26/2021 当删除数据库数据时，分页查询有bug
                ResourceMigrateExecutor.PageFilesHandler pageFilesHandler = page
                    -> fileService.getBaseMapper().pageDeletedFiles(page, spaceIds, geDate, ltDate);
                ResourceMigrateExecutor.DealStorageHandler dealFileHandler = file
                    -> storageModeHandler.dealFileStorage(
                    new FileStorageInfo(file.getCatalog(), file.getResourceKey(), file.getIsLocal(), file.getIsCloud()),
                    StorageModeEnum.CLEAR_FILE);
                log.debug("文件清理任务开始...");
                ResourceMigrateExecutor.execute(threadCount, taskName, pageFilesHandler, dealFileHandler);
                log.debug("文件清理任务结束.");
            } finally {
                RedisUtil.del(TASK_LOCK_CLEAR_KEY);
            }
        });
    }

    public void migrateFiles2CloudAnd2NewCatalog(List<String> spaceIds, Date geDate, Date ltDate, Date beforeDate2Cloud, String newCatalog, Integer threadCount) {
        if (tryMethodLock(METHOD_LOCK_MIGRATE_KEY, TASK_LOCK_MIGRATE_KEY)) return;

        ThreadUtil.execute(() -> {
            try {
                String taskMigrateLocal2Local = "migrateLocal2Local";
                String taskMigrateLocal2Cloud = "migrateLocal2Cloud";

                // 本地迁移目录，若beforeDate2Cloud不为null，则之迁移此时间之后(含)的数据
                if (StrUtil.isNotBlank(newCatalog)) {
                    ResourceMigrateExecutor.PageFilesHandler pageFilesHandlerLocally = page
                        -> fileService.getBaseMapper().pageHotFiles(page, spaceIds, beforeDate2Cloud, ltDate);
                    ResourceMigrateExecutor.DealStorageHandler dealFileHandler2Locally = resourceFile
                        -> this.migrateFileLocal2Local(resourceFile.getCatalog(), newCatalog, resourceFile.getResourceKey());

                    log.debug("文件迁移(本地复制)任务开始，newCatalog: {},beforeDate: {} ...", newCatalog, DateUtil.formatDate(beforeDate2Cloud));
                    ResourceMigrateExecutor.execute(threadCount, taskMigrateLocal2Local, pageFilesHandlerLocally, dealFileHandler2Locally);
                    log.debug("文件迁移(本地复制)任务结束.");
                }

                // 迁移到S3
                if (Objects.nonNull(beforeDate2Cloud)) {
                    ResourceMigrateExecutor.PageFilesHandler pageFilesHandler2Cloud = page
                        -> fileService.getBaseMapper().pageColdFiles(page, spaceIds, geDate, beforeDate2Cloud);
                    ResourceMigrateExecutor.DealStorageHandler dealFileHandler2Cloud = resourceFile
                        -> this.migrateFileLocal2Cloud(resourceFile.getCatalog(), newCatalog, resourceFile.getResourceKey());

                    log.debug("文件迁移(上传S3)任务开始，newCatalog: {},beforeDate: {} ...", newCatalog, DateUtil.formatDate(beforeDate2Cloud));
                    ResourceMigrateExecutor.execute(threadCount, taskMigrateLocal2Cloud, pageFilesHandler2Cloud, dealFileHandler2Cloud);
                    log.debug("文件迁移(上传S3)任务结束.");
                }
            } finally {
                RedisUtil.del(TASK_LOCK_MIGRATE_KEY);
            }
        });
    }

    public void separateHboxAndDS(Integer threadCount) {
        if (tryMethodLock(METHOD_LOCK_SEPARATE_DS_KEY, TASK_LOCK_SEPARATE_DS_KEY)) return;

        ThreadUtil.execute(() -> {
            try {
                int pageSize = 1000;
                int pageNum = 1;
                int tc = ObjectUtil.compare(threadCount, 0) > 0 ? threadCount : 1;

                ExecutorService executor = ThreadUtil.newExecutor(tc);
                Queue<ResourceFolder> dsFolderQueue = new ConcurrentLinkedQueue<>();

                List<String> folderIds = Collections.synchronizedList(new ArrayList<>());

                log.debug("分离HBox和DS文件与目录开始...");
                // task start
                long start = System.currentTimeMillis();
                // 处理Hbox空间
                for (int i = 0; i < 10; i++) {
                    folderService.getBaseMapper().updateHboxSpaceId();
                }

                log.debug("处理hpf文件同名目录...");
                while (true) {
                    IPage<ResourceFolder> result = folderService.getBaseMapper().pageDsFolders(new Page<>(pageNum, pageSize));
                    if (CollUtil.isEmpty(result.getRecords())) {
                        break;
                    }
                    dsFolderQueue.addAll(result.getRecords());
                    CountDownLatch countDownLatch = new CountDownLatch(tc);
                    for (int i = 0; i < tc; i++) {
                        executor.execute(() -> {
                            try {
                                ResourceFolder dsFolder;
                                while ((dsFolder = dsFolderQueue.poll()) != null) {
                                    folderIds.add(dsFolder.getPid());
                                    this.listSubFolderAndFiles(folderIds, dsFolder.getPid());
                                }
                            } finally {
                                countDownLatch.countDown();
                            }
                        });
                    }
                    try {
                        countDownLatch.await();
                    } catch (InterruptedException e) {
                        log.error("分离HBox和DS文件与目录被打断");
                        throw ExceptionUtil.wrapRuntime(e);
                    }
                    this.updateDsSpaceId(folderIds);
                    folderIds.clear();
                    pageNum++;
                }
                // 处理不处于空间根目录的DS项目
                log.debug("处理不处于空间根目录的DS项目...");
                folderIds.clear();
                List<ResourceFolder> specialFolders = folderService.getBaseMapper().listFolderContainsDsProject();
                for (ResourceFolder specialFolder : specialFolders) {
                    String topFolderId = this.findTopFolderId(specialFolder);
                    folderIds.add(topFolderId);
                    this.listSubFolderAndFiles(folderIds, topFolderId);
                }
                this.updateDsSpaceId(folderIds);

                // 更新DS项目hpf文件
                log.debug("更新DS项目hpf文件...");
                fileService.lambdaUpdate()
                    .likeLeft(ResourceFile::getName, ".hpf")
                    .set(ResourceFile::getSpaceId, StorageSpaceEnum.DEVICE_STUDIO_SPACE.getCode())
                    .update();

                // 更新根目录
                log.debug("更新根目录...");
                folderService.getBaseMapper().updateSupIdInRoot();
                fileService.getBaseMapper().updateSupIdInRoot();
                long cost = System.currentTimeMillis() - start;
                log.debug("分离HBox和DS文件与目录完成, cost: {}ms", cost);
            } finally {
                RedisUtil.del(TASK_LOCK_SEPARATE_DS_KEY);
            }
        });
    }

    private boolean tryMethodLock(String methodLockKey, String taskLockKey) {
        RLock lock = redissonClient.getLock(methodLockKey);
        if (!lock.tryLock()) {
            return true;
        }
        try {
            if (RedisUtil.hasKey(taskLockKey)) {
                throw new ConflictException("任务正在进行中...");
            } else {
                RedisUtil.set(taskLockKey, "1");
            }
        } finally {
            lock.unlock();
        }
        return false;
    }

    private void listSubFolderAndFiles(List<String> folderIds, String supId) {
        List<Integer> deleted = ResourceStatusEnum.DELETED.groupCodes();
        List<ResourceFolder> folderList = folderService.lambdaQuery()
            .select(ResourceFolder::getPid)
            .eq(ResourceFolder::getSupId, supId)
            .notIn(ResourceFolder::getStatus, deleted)
            .list();
        for (ResourceFolder folder : folderList) {
            folderIds.add(folder.getPid());
            this.listSubFolderAndFiles(folderIds, folder.getPid());
        }
    }

    private String findTopFolderId(ResourceFolder folder) {
        if (StorageUtil.isRootId(folder.getSupId())) {
            return folder.getPid();
        }
        ResourceFolder supFolder = folderService.lambdaQuery()
            .eq(ResourceFolder::getPid, folder.getSupId())
            .one();
        return this.findTopFolderId(supFolder);
    }

    private void updateDsSpaceId(List<String> folderIds) {
        folderService.lambdaUpdate().in(ResourceFolder::getPid, folderIds)
            .set(ResourceFolder::getSpaceId, StorageSpaceEnum.DEVICE_STUDIO_SPACE.getCode())
            .update();
        fileService.lambdaUpdate().in(ResourceFile::getSupId, folderIds)
            .set(ResourceFile::getSpaceId, StorageSpaceEnum.DEVICE_STUDIO_SPACE.getCode())
            .update();
    }

    private void migrateFileLocal2Local(String originCatalog, String targetCatalog, String resourceKey) {
        if (StrUtil.isBlank(targetCatalog)) {
            return;
        }
        if (StrUtil.hasBlank(originCatalog, resourceKey)) {
            log.error("文件信息不完整：originCatalog: {}, resourceKey: {}", originCatalog, resourceKey);
            return;
        }
        File originLocalFile = fileStorageService.getFile(catalogService.getFilePath(originCatalog, resourceKey));
        File targetLocalFile = fileStorageService.getFile(catalogService.getFilePath(targetCatalog, resourceKey));

        // 适配原存储方式
        if (!FileUtil.exist(originLocalFile)) {
            originLocalFile = fileStorageService.getFile(originCatalog, resourceKey);
        }

        // 文件迁移
        // 本地目标已存在
        if (FileUtil.isFile(targetLocalFile) && (!FileUtil.isFile(originLocalFile) || Objects.equals(targetLocalFile.length(), originLocalFile.length()))) {
            log.debug("[migrateLocal2Local]文件已存在: {}", resourceKey);
        }
        // 本地目标不存在
        else {
            // 本地源不存在
            if (!FileUtil.isFile(originLocalFile)) {
                if (s3FileService.exists(resourceKey)) {
                    log.debug("[migrateLocal2Local]文件已存在于S3: {}", resourceKey);
                    return;
                }
                String errorMsg = StrUtil.format("{key: {}, localPath: {}, errorMsg: 本地文件丢失}!", resourceKey, originLocalFile);
                throw new LostException(errorMsg);
            }
            // 复制
            log.debug("[migrateLocal2Local]复制文件: {} -> {}", originLocalFile, targetLocalFile);
            FileUtil.copy(originLocalFile, targetLocalFile, true);
        }

        // 更新数据库
        if (!Objects.equals(originCatalog, targetCatalog)) {
            this.updateInfoAfterDealStorage(targetCatalog, resourceKey, IsEnum.TRUE.getCode(), IsEnum.FALSE.getCode());
        }

    }

    private void migrateFileLocal2Cloud(String originCatalog, String targetCatalog, String resourceKey) {
        if (StrUtil.hasBlank(originCatalog, resourceKey)) {
            log.error("文件信息不完整：originCatalog: {}, resourceKey: {}", originCatalog, resourceKey);
            return;
        }
        if (StrUtil.isBlank(targetCatalog)) {
            targetCatalog = originCatalog;
        }
        File originLocalFile = fileStorageService.getFile(catalogService.getFilePath(originCatalog, resourceKey));

        // 适配原存储方式
        if (!FileUtil.exist(originLocalFile)) {
            originLocalFile = fileStorageService.getFile(originCatalog, resourceKey);
        }

        // 文件上传
        // 云端已存在，无需上传
        if (s3FileService.exists(resourceKey)) {
            log.debug("[migrateLocal2Cloud]文件已存在于S3: {}", resourceKey);
        }
        // 云端不存在，本地上传
        else {
            // 本地不存在
            if (!FileUtil.isFile(originLocalFile)) {
                String errorMsg = StrUtil.format("{key: {}, localPath: {}, errorMsg: 本地文件丢失}!", resourceKey, originLocalFile);
                throw new LostException(errorMsg);
            }
            // 上传S3
            storageModeHandler.uploadToS3(resourceKey, originLocalFile);
        }

        if (Objects.equals(originCatalog, targetCatalog)) {
            this.updateInfoAfterDealStorage(targetCatalog, resourceKey, IsEnum.FALSE.getCode(), IsEnum.TRUE.getCode());
        }
    }

    private void updateInfoAfterDealStorage(String catalog, String resourceKey, Integer isLocal, Integer isCloud) {
        fileService.lambdaUpdate()
            .eq(ResourceFile::getResourceKey, resourceKey)
            .set(isLocal != null, ResourceFile::getIsLocal, isLocal)
            .set(isCloud != null, ResourceFile::getIsCloud, isCloud)
            .set(StrUtil.isNotBlank(catalog), ResourceFile::getCatalog, catalog)
            .update();
    }

}
