package com.koicarp.agent.provider.tools;

import java.util.*;
import java.util.function.Function;

import com.alibaba.druid.pool.DruidDataSource;
import com.koicarp.agent.common.KoiException;
import com.koicarp.agent.provider.Agent;
import com.koicarp.agent.provider.ChatMemeoryCache;
import com.koicarp.agent.provider.CustomPgVectorEmbeddingStore;
import com.koicarp.agent.provider.EmbedStoreProperties;
import com.koicarp.agent.provider.build.ModelConfig;
import com.koicarp.agent.provider.build.OllamEmbedModelBuilder;
import com.koicarp.agent.provider.build.OllamaBigModelBuilder;
import com.koicarp.agent.provider.build.OpenAIBigModelBuilder;
import com.koicarp.agent.provider.constant.EmbedConstant;
import com.koicarp.agent.provider.constant.ProviderErrMsgConstant;
import com.koicarp.agent.provider.entity.*;
import com.koicarp.agent.provider.enums.ModelProviderEnum;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;

import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.loader.UrlDocumentLoader;
import dev.langchain4j.data.document.parser.apache.tika.ApacheTikaDocumentParser;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.Query;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.filter.Filter;
import dev.langchain4j.store.embedding.pgvector.PgVectorEmbeddingStore;
import lombok.extern.slf4j.Slf4j;

/**
 * @ClassName:  AiProviderUtil   
 * @Description: Ai提供者工具类  
 * @author: liutao
 * @date:   2025年3月6日 上午10:05:35    
 */
@Slf4j
public class AIProviderUtil {

	private AIProviderUtil() {
		
	}
	
	protected static EmbedStoreProperties embedStoreProp;
	private static DruidDataSource dataSource = new DruidDataSource();
	
	public static void init(EmbedStoreProperties embedStoreProperties) {
		AIProviderUtil.embedStoreProp = embedStoreProperties;
		dataSource.setUrl(embedStoreProp.getDataSourceUrl());
		dataSource.setUsername(embedStoreProp.getUsername());
		dataSource.setPassword(embedStoreProp.getPassword());
	}
	
	/**
	 * 
	 * @Description: 构建向量数据库
	 * @param tableName
	 * @param dimension
	 * @return EmbeddingStore<TextSegment>
	 *
	 * liutao 2025年3月6日 上午10:55:19
	 */
	public static EmbeddingStore<TextSegment> buildEmbedStore(String tableName, int dimension) {

		dataSource.setDriverClassName(EmbedConstant.PGSQL_DRIVER_PATH);
		return PgVectorEmbeddingStore.datasourceBuilder()
								.datasource(dataSource)
								.table(tableName)
								.dimension(dimension)
								.indexListSize(ModelConfig.INDEX_LIST_SIZE)
								.useIndex(true)
								.createTable(true)
								.dropTableFirst(false)
								.build();
	}
	
	/**
	 * 
	 * @Description: 	构建向量模型
	 * @param baseUrl	基础路径
	 * @param modelName	模型名称
	 * @return EmbeddingModel
	 *
	 * liutao 2025年3月6日 上午11:03:06
	 */
	public static EmbeddingModel buildEmbedModel(String baseUrl, String modelName, String provider) {
		EmbedModel model = new EmbedModel();
		model.setBaseUrl(baseUrl);
		model.setModel(modelName);
		model.setProvider(provider);
		return OllamEmbedModelBuilder.build().embedding(model);
	}
	
	/**
	 * @Description: 构建大模型,目前支持openAI形式，和ollama
	 * @param model
	 * @return StreamingChatLanguageModel
	 * @date 2025-03-07 
	 * @author shen
	 */
	public static StreamingChatLanguageModel buildLargeModel(LargeModel model) {
		String provider = model.getProvider();
		String ollama = ModelProviderEnum.OLLAMA.name();
		if (ollama.equalsIgnoreCase(provider)) {
			return  OllamaBigModelBuilder.build().streamingChat(model);
		}
		
		String deepseek = ModelProviderEnum.DEEPSEEK.name();
		if (deepseek.equalsIgnoreCase(provider)) {
			return OpenAIBigModelBuilder.build().streamingChat(model);
		}
		
		log.error(ProviderErrMsgConstant.LARGE_MODEL_NOT_FIND, provider);
		throw new KoiException(ProviderErrMsgConstant.OPENAI_STREAM_CONFIG_ERROR);
	}
	
	
	
	/**
	 * 
	 * @Description: 构建流式聊天aiservice
	 * @param chatModel
	 * @param maxMessages
	 * @param conversationId
	 * @param prompt
	 * @return AiServices<Agent>
	 *
	 * liutao 2025年3月6日 下午2:19:25
	 */
	public static AiServices<Agent> buildStreamChat(StreamingChatLanguageModel chatModel, Integer maxMessages,
													Integer conversationId, String prompt) {
		AiServices<Agent> aiServices = AiServices.builder(Agent.class)
				.streamingChatLanguageModel(chatModel)
				.chatMemoryProvider(memoryId -> 
					MessageWindowChatMemory.builder()
										.id(conversationId)
										.maxMessages(maxMessages)
										.chatMemoryStore(new ChatMemeoryCache())
										.build());
		if (StringUtils.hasLength(prompt)) {
			aiServices.systemMessageProvider(memoryId-> prompt);
		}
		return aiServices;
	}
	
	public static <T> AiServices<T> buildStreamChat(Class<T> class1, StreamingChatLanguageModel chatModel, Integer maxMessages,
			Integer conversationId, String prompt) {
		AiServices<T> aiServices = AiServices.builder(class1)
				.streamingChatLanguageModel(chatModel);
		
		if (Objects.nonNull(conversationId)) {
			aiServices.chatMemoryProvider(memoryId -> 
											MessageWindowChatMemory.builder()
											.id(conversationId)
											.maxMessages(maxMessages)
											.chatMemoryStore(new ChatMemeoryCache())
											.build());
		}
				
		if (StringUtils.hasLength(prompt)) {
			aiServices.systemMessageProvider(memoryId-> prompt);
		}
		return aiServices;
	}
	
	/**
	 * 
	 * @Description: 构建非流式聊天aiservice
	 * @param model
	 * @param maxMessages
	 * @param conversationId
	 * @param prompt
	 * @return AiServices<Agent>
	 *
	 * liutao 2025年3月6日 下午2:19:41
	 */
	public static AiServices<Agent> buildChat(ChatLanguageModel model, Integer maxMessages, String conversationId, String prompt) {
		AiServices<Agent> aiServices = AiServices.builder(Agent.class)
				.chatLanguageModel(model)
				.chatMemoryProvider(memoryId -> 
					MessageWindowChatMemory.builder()
										.id(conversationId)
										.maxMessages(maxMessages)
										.chatMemoryStore(new ChatMemeoryCache())
										.build());
		if (StringUtils.hasLength(conversationId)) {
			aiServices.systemMessageProvider(memoryId-> prompt);
		}
		return aiServices;
	}
	
	/**
	 * 
	 * @Description: 			构建RAG配置
	 * @param embeddingStore	向量数据库
	 * @param embeddingModel	向量模型
	 * @param filter			动态过滤器
	 * @return ContentRetriever
	 *
	 * liutao 2025年3月6日 下午3:02:08
	 */
	public static ContentRetriever buildContentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel,
			Function<Query, Filter> filter) {
		return EmbeddingStoreContentRetriever.builder()
                .embeddingStore(embeddingStore)
                .embeddingModel(embeddingModel)
                .dynamicFilter(filter)
                .minScore(ModelConfig.RAG_MIN_SCORE)
                .build();
	}
	
	/**
	 * 
	 * @Description: 			文本向量化并存入向量数据库
	 * @param embeddingStore	向量数据库
	 * @param embeddingModel	向量模型
	 * @param splitter			文档切分器
	 * @param message			文本
	 * @param knowledgeId		知识库id
	 * @param docName			文本名称
	 * @return String
	 *
	 * liutao 2025年3月6日 下午3:02:40
	 */
	public static EmbedResult embeddingText(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel, DocumentSplitter splitter,
											String message, Integer knowledgeId, String docName) {
		Document docment = Document.from(message);
		
		EmbedResult result = new EmbedResult();
		result.setContent(docment.text());
		List<EmbedSlice> slices = embeddingWithSplitter(embeddingStore, embeddingModel, splitter, docment, knowledgeId, docName);
		result.setSlices(slices);
		return result;
	}
	
	/**
	 * 
	 * @Description: 			文档向量化并存入数据库
	 * @param embeddingStore	向量数据库
	 * @param embeddingModel	向量模型
	 * @param splitter			文本切分器
	 * @param url				文档url链接
	 * @param knowledgeId		知识库id
	 * @param docName			文档名称
	 * @return List<String>
	 *
	 * liutao 2025年3月6日 下午3:03:14
	 */
	public static EmbedResult embeddingDoc(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel, DocumentSplitter splitter,
			String url, Integer knowledgeId, String docName) {
		Document docment = UrlDocumentLoader.load(url, new ApacheTikaDocumentParser());
		EmbedResult result = new EmbedResult();
		result.setContent(docment.text());
		List<EmbedSlice> slices = embeddingWithSplitter(embeddingStore, embeddingModel, splitter, docment, knowledgeId, docName);
		result.setSlices(slices);
		return result;
	}
	
	/**
	 * 
	 * @Description: 			含有文本切分器的向量化
	 * @param embeddingStore	向量数据库
	 * @param embeddingModel	向量模型
	 * @param splitter			文本切分器
	 * @param docment			文档
	 * @param knowledgeId		知识库id
	 * @param docName			文档名称
	 * @return List<String>
	 *
	 * liutao 2025年3月6日 下午3:12:58
	 */
	private static List<EmbedSlice> embeddingWithSplitter(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel, DocumentSplitter splitter,
			Document docment, Integer knowledgeId, String docName) {
		docment.metadata()
				.put(EmbedConstant.KNOWLEDGE, String.valueOf(knowledgeId))
				.put(EmbedConstant.DOC_NAME, docName);
		List<EmbedSlice> results = new ArrayList<>();
		try {
			List<TextSegment> textSegments = splitter.split(docment);
			List<Embedding> embeddings = embeddingModel.embedAll(textSegments).content();
			List<String> vectorIds = embeddingStore.addAll(embeddings, textSegments);
            for (int i = 0; i < vectorIds.size(); i++) {
            	results.add(new EmbedSlice(vectorIds.get(i), textSegments.get(i).text().length()));
            }
			log.info("文档向量化结束,knowledgeId:{},docName:{}", knowledgeId, docName);
		} catch (Exception e) {
			log.error(ProviderErrMsgConstant.DOCS_EMBEDDING_ERROR, knowledgeId, docName);
		}
		
		return results;
	}
	
	/**
	 * 
	 * @Description:  通过向量id获取text集合
	 * @param embeddingStore
	 * @param vectorIds
	 * @return List<String>
	 *
	 * liutao 2025年3月7日 下午5:00:28
	 */
	public static Map<String, String> getTextsByVectorIds(String tableName, int dimension, List<String> vectorIds) {
		Map<String, String> textMap = new HashMap<>();
		if (CollectionUtils.isEmpty(vectorIds)) {
			return textMap;
		}
		
		CustomPgVectorEmbeddingStore embedStore = new CustomPgVectorEmbeddingStore(
				embedStoreProp.getHost(),
				embedStoreProp.getPort(),
				embedStoreProp.getUsername(),
				embedStoreProp.getPassword(),
				embedStoreProp.getDatabase(), 
				tableName, 
				dimension,
				true, 
				ModelConfig.INDEX_LIST_SIZE,
				true,
				false,
				null);
		
		List<EmbedDbModel> embedDbModels = embedStore.searchByVectorIds(vectorIds);
		for (EmbedDbModel embedDbModel : embedDbModels) {
			String text = embedDbModel.getText();
			String vectorId = embedDbModel.getVectorId();
			textMap.put(vectorId, text);
		}
		return textMap;
	}
	
}
