package com.sjgs.gis.fs;

import com.sjgs.common.ketama.DefaultHashAlgorithm;
import com.sjgs.common.ketama.KetamaNodeKeyFormatter;
import com.sjgs.common.ketama.KetamaNodeLocator;
import com.sjgs.common.utils.CronDateUtils;
import com.sjgs.common.utils.DirUtils;
import com.sjgs.common.utils.KryoUtils;
import com.sjgs.common.utils.UUIDUtils;
import com.sjgs.gis.common.FileSystemFactory;
import com.sjgs.gis.domain.Chunk;
import com.sjgs.gis.domain.DataFile;
import com.sjgs.gis.domain.Node;
import com.sjgs.gis.domain.ScheduleJob;
import com.sjgs.gis.domain.enumeration.ChunkStatus;
import com.sjgs.gis.domain.enumeration.DataFileStatus;
import com.sjgs.gis.domain.enumeration.DataFileType;
import com.sjgs.gis.errors.FileGrpcSyncException;
import com.sjgs.gis.fs.store.DistributedFileStore;
import com.sjgs.gis.fs.store.LocalFileStore;
import com.sjgs.gis.service.ChunkService;
import com.sjgs.gis.service.DataFileService;
import com.sjgs.gis.quartz.ScheduleJobService;
import com.sjgs.gis.errors.InvalidChunkException;
import com.sjgs.gis.utils.QETag;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.cloud.netflix.eureka.EurekaDiscoveryClient;
import org.springframework.http.MediaType;
import org.springframework.http.MediaTypeFactory;
import org.springframework.http.codec.multipart.FilePart;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Date;
import java.time.Duration;
import java.time.Instant;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

import static com.sjgs.common.Constants.PARAMS_SEPARATOR;
import static com.sjgs.common.Constants.SECOND;
import static com.sjgs.common.Constants.SEP;

/**
 * 文件服务
 *
 * @author jxw
 */
@Service("fsService")
public class FileSystemService {

    private static final Log logger = LogFactory.getLog(FileSystemService.class);


    @Autowired
    ChunkService chkservice;

    @Autowired
    private DataFileService dfservice;

    @Autowired
    ChunkSyncService syncService;

    @Autowired
    Node currentNode;

    @Autowired
    EurekaDiscoveryClient discoveryClient;

    @Autowired
    private ScheduleJobService scheduleJobService;

    @Value("${filesystem.rootUrl:hdfs://localhost:9009}")
    private String rootUrl;

    @Value("${filesystem.rootDir:/data}")
    private String rootDir;

    @Value("${filesystem.checkRetry:2}")
    private int checkRetry;

    @Value("${filesystem.writeRetry:5}")
    private int writeRetry;

    /**
     * 文件名查文件
     *
     * @param fname
     * @return
     */
    public Flux<DataFile> queryFileByName(String fname) {
        //1-local 2-remote
        return dfservice.findByFileName(fname);
    }

    /**
     * 文件key信息
     *
     * @param fkey
     * @return
     */
    public Mono<DataFile> queryFile(String fkey) {
        //1-local 2-remote
        return dfservice.findByFkey(fkey);
    }

    /**
     * 文件秒传检测
     *
     * @param fkey
     * @return
     */
    public Mono<DataFile> checkFile(String fkey) {
        //1-local 2-remote
        return dfservice.findByFkeyAndStatusGreaterThan(fkey, DataFileStatus.PENDING.ordinal());
    }

    /**
     * 文件更新检测
     *
     * @param fname
     * @return
     */
    public Mono<DataFile> getDataFileByName(String fname) {
        return dfservice.findByFileName(fname)
                .next()
                .flatMap(dataFile -> {
                    if (dataFile.getStatus() >= DataFileStatus.CHUNK_FINISHED.ordinal()) {
                        return Mono.just(dataFile);
                    } else {
                        return Mono.empty();
                    }
                }).switchIfEmpty(Mono.empty());
    }

    /**
     * 文件秒传检测
     *
     * @param fkey
     * @return
     */
    public Mono<String> checkFile(String fkey, Long fsize, String dir, String storeType) {
        return checkFile(fkey).flatMap(dataFile -> {
            if (dataFile.getStatus() > DataFileStatus.PENDING.ordinal()) {
                return Mono.just("false");
            } else {
                return Mono.just(Base64.encodeBase64String(KryoUtils.serializeObject(dataFile)));
            }
        }).switchIfEmpty(Mono.defer(() -> {
            MediaType mediaType = MediaTypeFactory.getMediaType(fkey)
                    .orElse(MediaType.APPLICATION_OCTET_STREAM);
            DataFile df = new DataFile(fkey, fsize, mediaType.toString());
            if (dir != null) {
                df.setDir(dir);
            }
            if (storeType != null) {
                df.setType(Integer.valueOf(storeType));
            }
            return createFile(df)
                    .flatMap(dataFile -> {
                        return Mono.just(Base64.encodeBase64String(KryoUtils.serializeObject(dataFile)));
                    });
        }));
    }

    /**
     * 创建文件
     *
     * @param datafile
     * @return
     */
    public Mono<DataFile> createFile(DataFile datafile) {
        //准备文件根目录
        String chunkdir = rootDir + DirUtils.getDir(datafile.getDir()) + datafile.getFkey();
        DirUtils.mkDir(chunkdir);
        //记录文件
        return dfservice.insertOrUpdate(datafile);
    }

    /**
     * 片续传
     *
     * @param fkey
     * @param chunkindex
     * @param chunkhash
     * @return
     */
    public Mono<Chunk> checkChunk(String fkey, Long chunkindex, String chunkhash) {
        //1- finished
        return chkservice.findByFkeyAndIndexAndHashAndStatus(fkey, chunkindex, chunkhash, ChunkStatus.FINISHED.ordinal());
    }


    /**
     * 上传片
     *
     * @param datafile
     * @param chunk
     * @param filePart
     * @return
     */
    public Mono<Chunk> putChunk(DataFile datafile,
                                Chunk chunk,
                                FilePart filePart) {
        //文件根目录
        String chunkdir = DirUtils.getFileDir(rootDir, datafile.getDir(), datafile.getFkey());
        DirUtils.mkDir(chunkdir);
        try {
            //make chunk file path
            Path chunkfile = DirUtils.getChunkPath(rootDir, datafile.getDir(), datafile.getFkey(), chunk.getIndex());
            return filePart.transferTo(chunkfile)
                    .then(chkservice.insertOrUpdate(chunk)
                            .flatMap(chk -> {
                                //检测是否完整
                                return checkChunkStatus(datafile, chk);
                            }));
        } catch (Exception e) {
            throw new InvalidChunkException();
        }
    }

    /**
     * 片状态及哈希效验
     *
     * @param chunk
     */
    public Mono<Chunk> checkChunkStatus(DataFile datafile, Chunk chunk) {
        Path chunkfile = DirUtils.getChunkPath(rootDir, datafile.getDir(), datafile.getFkey(), chunk.getIndex());
        long start = System.currentTimeMillis();
        boolean isvalid = false;
        try {
            String hash = QETag.calcETag(chunkfile.toString());
            isvalid = hash.equals(chunk.getHash()) &&
                    QETag.valid(chunkfile.toString(), chunk.getSize());
            logger.info(chunk.getIndex() + " : " + chunk.getHash() + "/" + hash + " : " + chunk.getSize());
        } catch (IOException e) {
            throw new InvalidChunkException();
        }
        logger.debug("QETag.calcETag cost time :" + String.valueOf((System.currentTimeMillis() - start)) + "ms");
        if (isvalid) {
            //getHostAddress
            chunk.setChunkAddr(currentNode.getInstanceId());
            // chunk is finished
            chunk.setStatus(ChunkStatus.FINISHED.ordinal());
            return chkservice.save(chunk)
                    .retry(checkRetry)
                    .flatMap(chk -> {
                        //try chunks is finished
                        return checkFileStatus(datafile, chk.getCount())
                                .flatMap(it -> {
                                    if (it) {
                                        //TODO 创建合并任务
                                        logger.debug("chunk finished!");
                                    }
                                    return Mono.just(chk);
                                });
                    }).onErrorResume(e -> {
                        logger.error(e.getMessage());
                        throw new InvalidChunkException();
                    });
        } else {
            try {
                Files.deleteIfExists(chunkfile);
            } catch (IOException e) {
                ;
            } finally {
                throw new InvalidChunkException();
            }
        }
    }

    /**
     * 文件状态效验及远程上传
     *
     * @param datafile
     * @param chunkcount
     */
    public Mono<Boolean> checkFileStatus(DataFile datafile, Long chunkcount) {
        String fkey = datafile.getFkey();
        return chkservice.findAllByFkeyAndStatus(fkey, ChunkStatus.FINISHED.ordinal())
                .collectList()
                .flatMap(chunks -> {
                    //when lastest chunk is finished
                    if (chunks.size() == chunkcount) {
                        datafile.setStatus(DataFileStatus.CHUNK_FINISHED.ordinal());
                        return dfservice.save(datafile)
                                .retry(checkRetry)
                                .flatMap(it -> {
                                    //TODO 记录一条binlog写入文件完成日志
                                    return Mono.just(true);
                                });
                    } else {
                        return Mono.just(false);
                    }
                });
    }

    /**
     * addJobForCheckFileCompleted
     *
     * @param fkey
     * @param count
     * @return
     */
    public Mono<String> addJobForCheckFileCompleted(String fkey, Long count) {
        String jobName = "checkFileCompleted" + SEP + fkey;
        ScheduleJob job = new ScheduleJob();
        job.setJobName(jobName);
        job.setBeanName("fsService");
        job.setMethodName("checkFileCompleted");
        String cronStr = CronDateUtils.getCron(Date.from(Instant.now().plusMillis(Math.round(Math.random() * 300) * SECOND)));
        job.setCronExpression(cronStr);
        //参数值序列化
        String str = fkey + PARAMS_SEPARATOR + count;
        job.setParameters(str);
        //参数类型
        job.setParameterTypes(fkey.getClass().getTypeName() + PARAMS_SEPARATOR + count.getClass().getTypeName());
        scheduleJobService.add(job);
        return Mono.just(jobName);
    }

    /**
     * 文件同步及合并存储
     * FIXME 处理逻辑说明:\全部片上传完整，验证通过，再按顺序写入HDFS
     * FIXME 因为单片上传是并发乱序，这里改成后台串行写入HDFS
     *
     * @param fkey
     * @param count
     * @return
     */
    public void checkFileCompleted(String fkey, Long count) {
        dfservice.findByFkeyAndStatus(fkey, DataFileStatus.CHUNK_FINISHED.ordinal())
                .subscribe(df -> {
                    FSStore fs = FileSystemFactory
                            .instance()
                            .getFileSystem(DataFileType.values()[df.getType()]);
                    long finalStart = System.currentTimeMillis();
                    if (fs instanceof ChunkStore) {
                        //同步远端节点片缓存
                        syncService.SyncDiffChunkToLocal(df.getFkey(), df.getDir())
                                .doOnError(e -> {
                                    throw new FileGrpcSyncException(df.getFkey());
                                })
                                .subscribe(sync -> {
                                    logger.debug("Sync DiffChunk To Local count：" + sync);
                                    //合并片缓存
                                    ((ChunkStore) fs).mergeChunks(df, count)
                                            .doOnError(e -> {
                                                throw new InvalidChunkException();
                                            })
                                            .retry(checkRetry)
                                            .subscribe(url -> {
                                                if (url == null || url == "") {
                                                    //TODO 多节点片同步失败->合并失败
                                                    logger.error("miss some chunk,can't merge chunks");
                                                } else {
                                                    logger.debug("mergeChunks cost time :" + String.valueOf((System.currentTimeMillis() - finalStart)) + "ms");
                                                    if (fs instanceof LocalFileStore) {
                                                        url = currentNode.getInstanceId() + "://" + url;
                                                        df.setUrl(url);
                                                        df.setType(DataFileType.LOCAL.ordinal());
                                                    } else if (fs instanceof DistributedFileStore) {
                                                        //TODO 模式1 多副本分片备份
                                                        df.setUrl(url);
                                                        //TODO 备份，备份记录表，key写入mirroUrl
                                                        df.setMirroUrl(null);
                                                        df.setType(DataFileType.DFS.ordinal());
                                                    } else {
                                                        //TODO 模式2 本地一份，远端一份
                                                        df.setUrl(url);
                                                    }
                                                    //TODO 记录一条binlog同步文件完成日志
                                                    df.setUpdateTime(LocalDateTime.now());
                                                    df.setStatus(DataFileStatus.RSYNC_FINISHED.ordinal());
                                                    dfservice.save(df)
                                                            .subscribe(it -> {
                                                                //删除相关任务
                                                                String jobName = "checkFileCompleted" + SEP + fkey;
                                                                scheduleJobService.deleteAllByName(jobName);
                                                                logger.debug("write fs complete : " + it.getUrl());
                                                                //尝试清除缓存片数据
                                                                cleanChunks(df);
                                                            });
                                                }
                                            });
                                });
                    }
                });
    }

    /**
     * @param dataFile
     */
    public void cleanChunks(DataFile dataFile) {
        String chunkdir = DirUtils.getFileDir(rootDir, dataFile.getDir(), dataFile.getFkey());
        if (Files.exists(Paths.get(chunkdir))) {
            List<Path> chunks = new ArrayList<>();
            try {
                chunks = Files.list(Paths.get(chunkdir)).collect(Collectors.toList());
                int total = chunks.size();
                chunks.stream()
                        .forEach(path -> {
                            try {
                                if (Files.isWritable(path)) {
                                    Files.delete(path);
                                }
                            } catch (IOException e) {
                                ;
                            }
                        });
                long unremove = Files.list(Paths.get(chunkdir)).count();
                if (unremove > 0) {
                    //TODO 增加清除片缓存计划任务
                    ScheduleJob job = new ScheduleJob();
                    job.setJobName("cleanChunks" + SEP + dataFile.getFkey());
                    job.setBeanName("fsService");
                    job.setMethodName("cleanChunks");
                    String cronStr = CronDateUtils.getCron(Date.from(Instant.now().plusMillis(60 * SECOND)));
                    job.setCronExpression(cronStr);
                    //参数值序列化
                    String df_str = Base64.encodeBase64String(KryoUtils.serializeObject(dataFile));
                    job.setParameters(df_str);
                    //参数类型
                    job.setParameterTypes(dataFile.getClass().getTypeName());
                    scheduleJobService.add(job);
                }
                //删除相关任务
                String jobName = "cleanChunks" + SEP + dataFile.getFkey();
                scheduleJobService.deleteAllByName(jobName);
                logger.info("remove chunks :" + (total - unremove));
            } catch (IOException e) {
                logger.error("find chunks error:" + e);
            }
        }
    }
}
