package com.cfp4cloud.cfp.knowledge.support.handler.rag.strategy.impl;

import cn.hutool.core.util.StrUtil;
import com.cfp4cloud.cfp.common.core.constant.enums.YesNoEnum;
import com.cfp4cloud.cfp.knowledge.entity.AiDatasetEntity;
import com.cfp4cloud.cfp.knowledge.entity.AiDocumentEntity;
import com.cfp4cloud.cfp.knowledge.entity.AiSliceEntity;
import com.cfp4cloud.cfp.knowledge.service.EmbeddingStoreService;
import com.cfp4cloud.cfp.knowledge.support.constant.DocumentTypeEnums;
import com.cfp4cloud.cfp.knowledge.support.constant.SliceStatusEnums;
import com.cfp4cloud.cfp.knowledge.support.constant.SourceTypeEnums;
import com.cfp4cloud.cfp.knowledge.support.handler.rag.strategy.EmbeddingStrategy;
import com.cfp4cloud.cfp.knowledge.support.provider.ModelProvider;
import dev.langchain4j.data.document.Metadata;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.DimensionAwareEmbeddingModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.store.embedding.EmbeddingStore;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;

import java.util.List;
import java.util.Map;

/**
 * 默认向量化策略实现
 * <p>
 * 适用于 Milvus、Qdrant、Chroma 等标准向量数据库 采用逐个切片向量化的方式，支持摘要增强和灵活的元数据配置
 * </p>
 *
 * @author pig
 * @date 2024-03-14
 */
@Slf4j
@Component
@RequiredArgsConstructor
public class DefaultEmbeddingStrategy implements EmbeddingStrategy {

	private final ModelProvider modelProvider;

	private final EmbeddingStoreService embeddingStoreService;

	@Override
	public boolean supports(EmbeddingStore<TextSegment> embeddingStore) {
		// 默认策略支持所有非 Neo4j 的向量存储
		return !embeddingStore.getClass().getSimpleName().contains("Neo4j");
	}

	@Override
	public void processEmbedding(AiDocumentEntity documentEntity, AiDatasetEntity aiDataset,
			List<AiSliceEntity> sliceEntityList, EmbeddingStore<TextSegment> embeddingStore) {

		log.debug("使用默认策略处理文档向量化: {}", documentEntity.getName());

		// 获取向量化模型
		DimensionAwareEmbeddingModel embeddingModel = modelProvider.getEmbeddingModel(aiDataset.getEmbeddingModel());

		int processedCount = 0;
		int failedCount = 0;

		// 逐个处理每个切片
		for (AiSliceEntity slice : sliceEntityList) {
			try {
				if (processSlice(slice, documentEntity, aiDataset, embeddingModel, embeddingStore)) {
					processedCount++;
				}
				else {
					failedCount++;
				}
			}
			catch (Exception e) {
				log.warn("切片 {} 向量化失败: {}", slice.getName(), e.getMessage(), e);
				slice.setSliceStatus(SliceStatusEnums.FAILED.getStatus());
				failedCount++;
			}
		}

		log.debug("默认策略: 成功处理 {} 个切片，失败 {} 个", processedCount, failedCount);
	}

	/**
	 * 处理单个切片的向量化
	 * @param slice 切片实体
	 * @param documentEntity 文档实体
	 * @param aiDataset 数据集配置
	 * @param embeddingModel 向量化模型
	 * @param embeddingStore 向量存储
	 * @return 处理是否成功
	 */
	private boolean processSlice(AiSliceEntity slice, AiDocumentEntity documentEntity, AiDatasetEntity aiDataset,
			DimensionAwareEmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore) {

		// 检查切片内容是否为空
		if (StrUtil.isBlank(slice.getContent())) {
			log.warn("切片内容为空，跳过向量化: {}", slice.getId());
			slice.setSliceStatus(SliceStatusEnums.FAILED.getStatus());
			return false;
		}

		// 如果切片已经向量化，先删除旧的向量数据
		if (StrUtil.isNotBlank(slice.getQdrantId())) {
			removeExistingEmbedding(aiDataset, slice);
		}

		// 构建向量化内容
		String embeddingContent = buildEmbeddingContent(slice, documentEntity, aiDataset);

		// 执行向量化
		String vectorId = performEmbedding(embeddingContent, slice, documentEntity, embeddingModel, embeddingStore);

		if (StrUtil.isNotBlank(vectorId)) {
			slice.setSliceStatus(SliceStatusEnums.SLICED.getStatus());
			slice.setQdrantId(vectorId);
			return true;
		}
		else {
			slice.setSliceStatus(SliceStatusEnums.FAILED.getStatus());
			return false;
		}
	}

	/**
	 * 构建向量化内容 组合文档名称、切片内容和可选的摘要信息
	 */
	private String buildEmbeddingContent(AiSliceEntity slice, AiDocumentEntity documentEntity,
			AiDatasetEntity aiDataset) {
		// 基础内容：文档名称 + 切片内容
		String content = String.format("%s\n%s", documentEntity.getName(), slice.getContent());

		// 如果配置了预摘要且条件满足，追加摘要信息
		if (shouldIncludeSummary(aiDataset, documentEntity)) {
			String summary = StrUtil.subSufByLength(documentEntity.getSummary(), 200);
			content = content + summary;
		}

		return content;
	}

	/**
	 * 判断是否应该包含摘要
	 */
	private boolean shouldIncludeSummary(AiDatasetEntity aiDataset, AiDocumentEntity documentEntity) {
		return YesNoEnum.YES.getCode().equals(aiDataset.getPreSummary())
				&& !SourceTypeEnums.QA.getType().equals(documentEntity.getSourceType())
				&& StrUtil.isNotBlank(documentEntity.getSummary());
	}

	/**
	 * 执行向量化并存储
	 */
	private String performEmbedding(String content, AiSliceEntity slice, AiDocumentEntity documentEntity,
			DimensionAwareEmbeddingModel embeddingModel, EmbeddingStore<TextSegment> embeddingStore) {

		// 向量化文本内容
		Response<Embedding> embeddingResponse = embeddingModel.embed(content);

		// 构建元数据
		Map<String, Object> metadataMap = Map.of(DocumentTypeEnums.Fields.type, DocumentTypeEnums.ANSWER.getType(),
				AiSliceEntity.Fields.id, slice.getId().toString(), AiDocumentEntity.Fields.datasetId,
				documentEntity.getDatasetId().toString(), AiSliceEntity.Fields.documentId,
				documentEntity.getId().toString(), AiDocumentEntity.Fields.sourceType, documentEntity.getSourceType());

		// 创建文本段落
		TextSegment textSegment = TextSegment.textSegment(content, new Metadata(metadataMap));

		// 存储向量和文本段落
		return embeddingStore.add(embeddingResponse.content(), textSegment);
	}

	/**
	 * 删除已存在的向量数据
	 */
	private void removeExistingEmbedding(AiDatasetEntity aiDataset, AiSliceEntity slice) {
		try {
			log.debug("删除已存在的向量数据: {}", slice.getId());
			embeddingStoreService.delete(aiDataset.getCollectionName(), List.of(slice.getQdrantId()));
		}
		catch (Exception e) {
			log.warn("删除旧向量数据失败: {}", e.getMessage(), e);
		}
	}

}