package cn.echoparrot.domain.service;


import cn.echoparrot.domain.entity.DataFile;
import cn.echoparrot.domain.entity.Segment;
import cn.echoparrot.repository.neo4j.DataFileMapper;
import cn.echoparrot.repository.neo4j.DatasetMapper;
import cn.echoparrot.domain.entity.Dataset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.ai.document.Document;
import org.springframework.ai.document.DocumentReader;
import org.springframework.ai.embedding.BatchingStrategy;
import org.springframework.ai.embedding.EmbeddingModel;
import org.springframework.ai.embedding.EmbeddingOptionsBuilder;
import org.springframework.ai.reader.pdf.PagePdfDocumentReader;
import org.springframework.ai.reader.tika.TikaDocumentReader;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.FileSystemResource;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import org.w3c.dom.Element;
import reactor.core.publisher.Flux;
import reactor.core.scheduler.Schedulers;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * 文件read - split - write 是一个长时间完成的任务，需要单独的线程或线程池处理。这些线程或线程池的创建是高成本的。
 * 一些用于分析文件的依赖库也可能涉及到一些需要open & close的资源。
 * 这些资源都由此分析器class常态管理。
 * @author Able
 */
@Service
public class Pipeline {

	private static final Logger logger = LoggerFactory.getLogger(Pipeline.class);

	private final TokenTextSplitter tokenTextSplitter = new TokenTextSplitter(300, 50, 3, 500, true);

	private final DatasetService datasetService;

	private final Map<Long, Flux<Result>> batches = new ConcurrentHashMap<>();

	public record Result(String name,boolean success) {
	}

	private final DatasetMapper datasetMapper;
	private final DataFileMapper dataFileMapper;
	private final BatchingStrategy batchingStrategy = new DocumentCountBatchStrategy();
	private final String dashScopeApiKey;

	@Autowired
	public Pipeline(DatasetService datasetService, DatasetMapper datasetMapper, DataFileMapper dataFileMapper, @Value("dashscope.api.key") String dashScopeApiKey){
		this.datasetService = datasetService;
		this.datasetMapper = datasetMapper;
		this.dataFileMapper = dataFileMapper;
		this.dashScopeApiKey = dashScopeApiKey;
	}

	/**
	 * 批量处理文件
	 * 文档ETL ： 读取相关的文档 -> 向量化 -> 入库。每个环节
	 * batch从Pipeline获取。Flux订阅逻辑改为两次订阅，一次是驱动生产，另一次是处理sse。
	 * 形成Dataset(知识库) -> File(文件) -> Segment/Document(段落)的层级结构。
	 * @param paths
	 * @return
	 */
	public long process(Long datasetId, EmbeddingModel embeddingModel, List<Path> paths) {
		Assert.state(datasetId>=0, "datasetId不能小于0");
		Optional<Dataset> dataset = datasetMapper.findById(datasetId);
		if(!dataset.isPresent()){
			throw new IllegalArgumentException("datasetId不存在");
		}
		ExecutorService executor = Executors.newVirtualThreadPerTaskExecutor();
		Flux<Result> flux = Flux.fromIterable(paths)
				.publishOn(Schedulers.fromExecutor(executor))
				.doOnNext(path -> logger.info("开始处理：{}", path))
				.doOnComplete(() -> logger.info("处理完成"))
				.map(path -> {
					Optional<DocumentReader> reader = reader(path);
					if (!reader.isPresent()) {
						logger.info("不支持此文件类型：{}", path);
						return new Result(path.getFileName().toString(), false);
					}

					List<Document> doclist = reader.get().read();
					if (doclist.isEmpty() && path.toString().endsWith(".pdf")) {
						doclist = new QwenOcrReader(path, dashScopeApiKey).read();
					}
					if (doclist.isEmpty()) {
						logger.info("文件没有可以向量化的内容：{}", path);
						return new Result(path.getFileName().toString(), false);
					}

					doclist = tokenTextSplitter.split(doclist);
					logger.info("读取了 {} 个文档", doclist.size());
					storeInLocal(datasetId, path, doclist);

					List<Segment> segments = doclist.stream().map(doc -> new Segment(doc.getText())).toList();
					DataFile dataFile = new DataFile(path.getFileName().toString(), dataset.get(), segments);
					DataFile savedFile = dataFileMapper.save(dataFile);
					logger.info("已保存文件：{}", savedFile);
					logger.info("文档片段已保存至数据库");
					List<float[]> embeddings = embeddingModel.embed(segments.stream().map(segment -> new Document(segment.text())).toList(), EmbeddingOptionsBuilder.builder().build(),
							this.batchingStrategy);
					segments.forEach(segment -> segment.embedding(embeddings.get(segments.indexOf(segment))));
					dataFileMapper.saveEmbedding(savedFile);
					logger.info("已添加至向量化的索引");
					return new Result(path.getFileName().toString(), true);

				})
				.cache(Duration.of(10, ChronoUnit.MINUTES));
		Long batch = System.currentTimeMillis();
		batches.put(batch, flux);
		// TODO 这里没有subscribeOn，所以会阻塞当前线程。也就是用当前线程处理。当return 浏览器收到batch号时，也就是已经全部文件处理完成了。需要观察日志看看是否如此。
		// 不考虑由前端get sse结果时触发批量处理，因为sse在http中默认是get方法支持幂等操作的。
		flux.subscribe(result -> {
			logger.info("完成处理成功 {}：{}", result.success, result.name());
		}, throwable -> {
			logger.error("处理出错：{}", throwable.getMessage());
			//Flux在完成时自动从Map中删除key，释放资源。
			batches.remove(batch);
		}, () -> {
			logger.info("处理完成");
			// TODO 应该是在批量任务完成后再一定时间后删除，否则前端SSE，看不见批量处理进度。可以在这里增加一个定时删除的任务。
			batches.remove(batch);
		});
		return batch;
	}

	/**
	 * 存储文件被切分的片段至本地文件系统，目录{datasets}/{datasetId}/segments/{filename}
	 * 用jsoup做成 xml的形式，
	 *
	 * ``` xml
	 * <file name="" size="">
	 * <segment></segment>
	 * </file>
	 * ```
	 * @param datasetId
	 * @param path
	 * @param doclist
	 */
	private void storeInLocal(Long datasetId, Path path, List<Document> doclist) {
		String filename = path.getFileName().toString()+".xml";
		Path directory = path.getParent().resolveSibling("segments");
		Path xmlFile = directory.resolve(filename);
		try{
			Files.createDirectories(directory);

			// 创建XML文档
			DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
			DocumentBuilder builder = factory.newDocumentBuilder();
			org.w3c.dom.Document xmlDoc = builder.newDocument();

			// 创建根元素
			Element fileElement = xmlDoc.createElement("file");
			fileElement.setAttribute("name", path.getFileName().toString());
			fileElement.setAttribute("size", String.valueOf(doclist.size()));
			xmlDoc.appendChild(fileElement);

			// 添加每个文档段落
			for (Document doc : doclist) {
				Element segmentElement = xmlDoc.createElement("segment");
				segmentElement.setTextContent(doc.getText());
				fileElement.appendChild(segmentElement);
			}

			// 写入文件
			TransformerFactory transformerFactory = TransformerFactory.newInstance();
			Transformer transformer = transformerFactory.newTransformer();
			DOMSource source = new DOMSource(xmlDoc);
			StreamResult result = new StreamResult(xmlFile.toFile());
			transformer.transform(source, result);
			logger.info("已保存XML文件: {}", xmlFile);
		} catch (IOException e) {
			logger.error("保存XML文件失败: {}", e.getMessage());
		} catch (ParserConfigurationException e) {
            throw new RuntimeException(e);
        } catch (TransformerConfigurationException e) {
            throw new RuntimeException(e);
        } catch (TransformerException e) {
            throw new RuntimeException(e);
        }

    }

	private void storeInLocalHtml(String datasetId, Path path, List<Document> doclist) {
		String filename = path.getFileName().toString()+".html";
		Path directory = path.getParent().resolveSibling("segments");
		Path xmlFile = directory.resolve(filename);
			try {
				Files.createDirectories(directory);

				// 创建XML文档
				org.jsoup.nodes.Document xmlDoc = new org.jsoup.nodes.Document("");
				xmlDoc.title(path.getFileName().toString());

				// 添加每个文档段落
				for (Document doc : doclist) {
					xmlDoc.body().appendElement("p").text(doc.getText());
				}
				// 写入文件
				Files.writeString(xmlFile, xmlDoc.html());
				logger.info("已保存XML文件: {}", xmlFile);
			} catch (IOException e) {
				logger.error("保存XML文件失败: {}", e.getMessage());
			}
		}



	/**
	 * 处理一个源文件一般不需要依赖于另一个源文件，本质上是逐个文件处理的。
	 * 文档ETL: E,DocumentReader文档阅读 -> T,TextSplitter文本切分 -> L,VectorStore向量存储。
	 * 对外事件：某文件已阅读，某文件已向量存储，某文件无法处理，
	 * 3. 文件转换或文件抽取，并将转换或抽取的结果保存至conversion目录下。
	 * 4. 读取结果文件，将其embedding。
	 * 5. 将embedding的结果及document对象保存至chroma。
	 */
	private Optional<DocumentReader> reader(Path path) {

		final String fileType = StringUtils.getFilenameExtension(path.toString());
		if (StringUtils.isEmpty(fileType)) {
			logger.info("文件格式未知：{}", path);
			return Optional.empty();
		}

		switch (fileType) {
		case "png":
		case "gif":
		case "jpg":
		case "jpeg":
			// 所有图片暂时不支持;
			return Optional.of(new QwenOcrReader(path, dashScopeApiKey));

		case "html":
			// 暂不支持html;
			return Optional.empty();
		case "pdf":
			return Optional.of(new PagePdfDocumentReader(new FileSystemResource(path)));
		case "txt":
		case "md":
		case "docx":
		case "doc":
			return Optional.of(new TikaDocumentReader(new FileSystemResource(path)));


		case "xlsx":
		case "xls":
			return Optional.empty();
		default:
			logger.info("暂不支持这个文件格式：{}", path);
			return Optional.empty();
		}
	}

	public Flux<Result> findBatch(Long batch){
		return batches.get(batch);
	}
}
