package org.groupg.mockdatabyspring.morkdata.service;

import cn.hutool.core.date.TimeInterval;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.T;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.groupg.mockdatabyspring.morkdata.model.DBDataFile;
import org.groupg.mockdatabyspring.morkdata.model.DBDataFiles;
import org.groupg.mockdatabyspring.morkdata.model.DBInfo.ColumnLink;
import org.groupg.mockdatabyspring.morkdata.model.DBInfo.DBColumnInfo;
import org.groupg.mockdatabyspring.morkdata.model.enums.DBDataFileStatus;
import org.groupg.mockdatabyspring.morkdata.model.enums.LinkType;
import org.groupg.mockdatabyspring.morkdata.model.enums.MarkDataConfig;
import org.groupg.mockdatabyspring.morkdata.service.runnables.DataFileConsumer;
import org.groupg.mockdatabyspring.morkdata.service.runnables.DataFileProducer;
import org.groupg.mockdatabyspring.morkdata.utils.FileReaderUtils;
import org.groupg.mockdatabyspring.morkdata.utils.GraphUtils;
import org.groupg.mockdatabyspring.system.exceptions.CheckException;
import org.springframework.stereotype.Service;

import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.SequenceInputStream;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.*;

import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.in;

/**
 * 默认请求处理的服务
 */

@Service
public class DefaultCreateService {

    static final Log log = LogFactory.get();


    /**
     * 注入的图数据库工具
     */
    private GraphUtils graphUtils = new GraphUtils();

    /**
     * 根据参数完成多数据表的创建与构造
     *
     * @param dbDataFiles 多张数据表的实例对象
     */
    public void executeDataFile(DBDataFiles dbDataFiles) {
        // 处理关系数据并形成图
        Set<DBDataFile> noGraphDbDataFiles = new HashSet<>();
        Set<DBDataFile> graphDBDataFiles = new HashSet<>();
        List<ColumnLink> columnLinkList = dbDataFiles.getColumnLinkList();
        // 图
        Graph graph = graphUtils.getNewGraph();
        for (ColumnLink columnLink : columnLinkList) {
            for (DBColumnInfo dbColumnInfo : columnLink.getLeftColumn()) {
                // 查找左侧点
                GraphTraversal<Vertex, Vertex> leftTraversal = graph.traversal().V().hasValue(dbColumnInfo);
                // 查找右侧点
                GraphTraversal<Vertex, Vertex> rightTraversal = graph.traversal().V().hasValue(columnLink.getRightColumn());
                Vertex leftV; // 左侧节点
                Vertex rightV; // 右侧节点

                // 如果找不到就添加，如果找得到就获取
                if (leftTraversal == null) {
                    leftV = graph.addVertex(T.label, "column", "Node", dbColumnInfo);
                } else {
                    leftV = leftTraversal.next();
                }
                if (rightTraversal == null) {
                    rightV = graph.addVertex(T.label, "column", "Node", columnLink.getRightColumn());
                } else {
                    rightV = rightTraversal.next();
                }
                // 构建有向边
                leftV.addEdge(columnLink.getLinkType(), rightV);
            }


            // 筛选不在关系图中的数据文件生成器
            for (DBDataFile dbDataFile : dbDataFiles.getDbTableInfoList()) {
                if (!(dbDataFile.getDbTableInfo().hasColumn(columnLink.getLeftColumn()) | dbDataFile.getDbTableInfo().hasColumn(columnLink.getRightColumn()))) {
                    noGraphDbDataFiles.add(dbDataFile);
                } else {
                    graphDBDataFiles.add(dbDataFile);
                }
            }
        }
        // 枚举每个独立生成的文件对象，并执行生成命令
        for (DBDataFile dbDataFile : noGraphDbDataFiles) {
            try {
                dbDataFile.execute();
                indexDBDataFile(dbDataFile);
            } catch (IOException | CheckException e) {
                log.error(e, e.getMessage());
            }
        }

        // 找到图的首节点，将每个字段按照图顺序生成
        // 设置默认字段基础数据
        int graphColSumNum = 10;
        for (DBDataFile dbDataFile : graphDBDataFiles) {
            graphColSumNum += dbDataFile.getDbTableInfo().getColumnInfos().size();
        }
        // 计算内存中保留的行数
        int allTableRowNum = 100000 / graphColSumNum;

        graph.traversal()
                .V()
                .inE().count().is(0)
                .forEachRemaining(vertex -> {
                    graph.traversal().V(vertex)
                            .repeat(in(LinkType.SUM, LinkType.SAME))
                            .until(in(LinkType.SUM, LinkType.SAME).count().is(0))
                            .path().forEachRemaining(v -> {

                            });
                });


    }

    /**
     * 针对完成数据文件组成的数据文件处理索引文件，索引文件的处理来源于每个分隔符以及数据字典的文件处理。
     *
     * @param dbDataFile 数据文件内容
     * @throws CheckException 参数检查用异常
     */
    private void indexDBDataFile(DBDataFile dbDataFile) throws CheckException, IOException {
        if (!DBDataFileStatus.OVER.equals(dbDataFile.getStatus())) {
            throw new CheckException("运行并未完成，请等待运行完成后再进行数据索引的创建");
        }
        File indexFile = new File(new File(dbDataFile.getAbsoluteFilePath()).getParentFile().getAbsoluteFile() + dbDataFile.getDbTableInfo().getTableName() + "Index");
        FileUtils.delete(indexFile);
        FileUtils.forceMkdir(indexFile);

        try (Directory directory = FSDirectory.open(Path.of(indexFile.toURI()))) {
            IndexWriterConfig iwc = new IndexWriterConfig(new StandardAnalyzer());
            iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
            IndexWriter writer = new IndexWriter(directory, iwc);
            //文件并索引其内容
            int columnRows = dbDataFile.getDbTableInfo().getColumnInfos().size();
            List<DBColumnInfo> dbColumnInfos = dbDataFile.getDbTableInfo().getColumnInfos().stream().toList();
            final int[] row = {0};
            FileReaderUtils.lines(indexFile, Charset.forName(dbDataFile.getCharsetName()), dbDataFile.getFieldSplit(), dbDataFile.getLineSplit()).forEach(strs -> {
                Document doc = new Document();
                doc.add(new IntField("row", row[0], Field.Store.YES));
                for (int i = 0; i < columnRows; i++) {
                    DBColumnInfo columnInfo = dbColumnInfos.get(i);
                    doc.add(new StringField(columnInfo.getColumnName(), strs[i], Field.Store.YES));
                }
                try {
                    writer.addDocument(doc); // 写入动作
                } catch (IOException e) {
                    log.error(e, e.getMessage());
                }
                row[0]++;
            });
            // 索引完成
            writer.flush();
            writer.close();
        }
    }

    /**
     * 基于普通内容的处理
     *
     * @param dbDataFile 文件配置
     * @param file       文件位置
     * @param m          一次要处理多少行
     * @param x          要处理多少次
     * @throws IOException    文件读写异常
     */
    public void defaultCreateFile(DBDataFile dbDataFile, File file, int m, int x) throws IOException {


        if (x > 1) {
            log.debug("文件内容较多，需要按每次{}分解。", (x + 1));

            for (int i = 0; i < x + 1; i++) {

                if (i == x) // 最后一次要处理数量
                    m = dbDataFile.getMaxRowNum() % m;
                dbDataFile.appendLines(file, m);
                log.debug("分解第{}/{}次", i + 1, (x + 1));
            }
        } else {
            log.debug("文件内容较少，一次性写入。");
            dbDataFile.appendLines(file);
        }

    }

    /**
     * 基于线程池的处理
     *
     * @param dbDataFile 文件配置
     * @param file       文件位置
     * @param m          一次要处理多少行
     * @param x          要处理多少次
     * @throws IOException    文件读写异常
     * @throws ExecutionException 文件处理异常
     * @throws InterruptedException 线程中断异常
     */
    public void defaultCreateFilePool(DBDataFile dbDataFile, File file, int m, int x) throws ExecutionException, InterruptedException, IOException {

        TimeInterval interval = new TimeInterval();
        interval.start();
        CountDownLatch latch = new CountDownLatch(x + 1);
        CountDownLatch latchConsumer = new CountDownLatch(x + 1);
        List<Future<File>> futures = new ArrayList<>();
        BlockingQueue<File> taskQueue = new LinkedBlockingQueue<>(20);
        // 创建一个固定大小的线程池
        ExecutorService executorProducer = Executors.newFixedThreadPool(5);
        // 创建一个固定大小的线程池
        ExecutorService executorConsumer = Executors.newFixedThreadPool(5);

        for (int i = 0; i < x + 1; i++) {
            futures.add(executorProducer.submit(new DataFileProducer(taskQueue, i, file, latch)));
        }
        for (int i = 0; i < x + 1; i++) {
            if (i == x)
                m = dbDataFile.getMaxRowNum() % m;
            executorConsumer.submit(new DataFileConsumer(taskQueue, m, dbDataFile, latchConsumer , i ,x + 1));
        }
        latch.await();
        latchConsumer.await();

        List<File> files = new ArrayList<>();
        List<InputStream> inputStreams = new ArrayList<>();
        for (Future<File> future : futures) {
            File file1 = future.get();
            files.add(file1);
            inputStreams.add(Files.newInputStream(Paths.get(file1.toURI())));
            FileUtils.writeLines(new File(dbDataFile.getAbsoluteFilePath()), IOUtils.readLines(Files.newInputStream(file1.toPath()),StandardCharsets.UTF_8), dbDataFile.getCharsetName(), true);
        }



        for (File file2 : files) {
            FileUtils.delete(file2);
        }

    }

    /**
     * 创建文件的总方法
     * @param dbDataFile    文件处理实例
     * @throws IOException    文件读写异常
     * @throws ExecutionException 文件处理异常
     * @throws InterruptedException 线程中断异常
     */
    public void createFile(DBDataFile dbDataFile) throws IOException, ExecutionException, InterruptedException {
        dbDataFile.setStatus(DBDataFileStatus.RUNNING);
        log.debug("启动创建文件");
        File file = FileUtils.getFile(dbDataFile.getAbsoluteFilePath());

        if (!file.getParentFile().exists()) {
            // 如果父文件夹存在并且为文件夹则删除重建
//            FileUtils.deleteDirectory(file.getParentFile());
            file.getParentFile().mkdirs();
        }
        if (file.isFile()) {
            // 如果文件存在则删除文件并重新创建
            file.delete();
            file.createNewFile();
        }
        if (dbDataFile.getIsHaveTitle()) {
            FileUtils.write(file, dbDataFile.getDbTableInfo().getTitleString(dbDataFile.getFieldSplit()) + dbDataFile.getLineSplit(), Charset.forName(dbDataFile.getCharsetName()));
        }

        int maxRam = 10000; // 字段数与行数的积 一次写文件内存中保留的内容
        int m = maxRam / dbDataFile.getDbTableInfo().getColumnInfos().size(); // 一次要处理多少行
        int x = dbDataFile.getMaxRowNum() / m; // 总共要处理多少次

        if (dbDataFile.getMaxRowNum() > MarkDataConfig.ROWNUMMAX) {
            this.defaultCreateFilePool(dbDataFile, file, m, x);
        } else {
            this.defaultCreateFile(dbDataFile, file, m, x);
        }
        log.debug("完成文件创建，文件路径为：{}", dbDataFile.getAbsoluteFilePath());
        dbDataFile.setStatus(DBDataFileStatus.OVER);
    }
}
