# encoding: utf-8
"""
新增一个简单的RAG交互
"""
import time
import threading
from typing import List

import numpy as np
from FlagEmbedding import LLMEmbedder

from .base import create_langchain_llm_client
from .model import NetworkEmbedder
from .prompt import SIMPLE_CONTRACT_PROMPT
from ...config.assist import logging
from ...config.base import EMBEDDING_MODEL_PATH, MAX_LEN_TEXT, EMBEDDING_DTYPE


class RAGSimpleAgent(object):
    top_similar_text = 10  # 返回最相似的段落
    max_tokens = 4096  # 最大tokens
    streaming = False
    embedder_task = 'qa'  # 'qa', 'icl', 'chat', 'lrlm', 'tool', 'convsearch'

    def __init__(self):
        # 大模型客户端
        self.llm_client = create_langchain_llm_client(model_name=None, api_path=None, max_tokens=RAGSimpleAgent.max_tokens, streaming=RAGSimpleAgent.streaming)

        # em
        self.flag_embedder = LLMEmbedder(EMBEDDING_MODEL_PATH, use_fp16=False) if EMBEDDING_DTYPE == 'local' else NetworkEmbedder()

    # def similar_paragraph_with_embedding(self, queries, texts: list, task=None, top_n=None):
    #     """
    #     :param queries: 问题集合
    #     :param texts: 段落组合 带索引
    #     :param task: 检索类型
    #     :param top_n: 返回最相似的N个文档
    #     :return:
    #     """
    #     if task is None:
    #         task = self.embedder_task
    #
    #     if top_n is None:
    #         top_n = self.top_similar_text
    #
    #     if isinstance(queries, str):
    #         queries = [queries]
    #
    #     query_embeddings = self.flag_embedder.encode_queries(queries, task=task)
    #     key_embeddings = self.flag_embedder.encode_keys(texts, task=task)
    #
    #     # 向量相似性
    #     similarities = query_embeddings @ key_embeddings.T
    #     print(similarities.shape)
    #
    #     # 返回每个问题最优相似性文本索引
    #     top_similar_indexes = {}
    #     for query_index, similarity in enumerate(similarities):
    #         text_similarity = [[text_index, score] for text_index, score in enumerate(similarity)]
    #         text_similarity = sorted(text_similarity, key=lambda x: x[1], reverse=True)[: top_n]
    #         top_similar_indexes[query_index] = text_similarity
    #     torch_gc()
    #     return top_similar_indexes

    # def similar_paragraph_with_embedding(self, queries, texts: list, task=None, top_n=None):
    #     """
    #     改为线性的方式
    #     :param queries: 问题集合
    #     :param texts: 段落组合 带索引
    #     :param task: 检索类型
    #     :param top_n: 返回最相似的N个文档
    #     :return:
    #     """
    #     torch_gc()
    #     if task is None:
    #         task = self.embedder_task
    #
    #     if top_n is None:
    #         top_n = self.top_similar_text
    #
    #     if isinstance(queries, str):
    #         queries = [queries]
    #
    #     similarities = np.zeros([len(queries), len(texts)])
    #     for i, query in enumerate(queries):
    #         query_embedding = self.flag_embedder.encode_queries(query, task=task)
    #         for j, text in enumerate(texts):
    #             key_embedding = self.flag_embedder.encode_keys(text, task=task)
    #             # 向量相似性
    #             similarity = query_embedding @ key_embedding.T
    #             similarities[i, j] = similarity
    #
    #     # 返回每个问题最优相似性文本索引
    #     top_similar_indexes = {}
    #     for query_index, similarity in enumerate(similarities):
    #         text_similarity = [[text_index, score] for text_index, score in enumerate(similarity)]
    #         text_similarity = sorted(text_similarity, key=lambda x: x[1], reverse=True)[: top_n]
    #         top_similar_indexes[query_index] = text_similarity
    #     # torch_gc()
    #     return top_similar_indexes

    def similar_paragraph_with_embedding(self, queries: List, texts: List, task=None, top_n=None):
        """
        改为线性的方式
        :param queries: 问题集合
        :param texts: 段落组合 带索引
        :param task: 检索类型
        :param top_n: 返回最相似的N个文档
        :return:
        """
        if task is None:
            task = self.embedder_task

        if top_n is None:
            top_n = self.top_similar_text

        if isinstance(queries, str):
            queries = [queries]

        if EMBEDDING_DTYPE == 'local':
            query_embeddings = []
            key_embeddings = []
            for query in queries:
                query_embedding = self.flag_embedder.encode_queries(query, task=task)
                query_embeddings.append(query_embedding)

            for text in texts:
                key_embedding = self.flag_embedder.encode_keys(text, task=task)
                key_embeddings.append(key_embedding)

            query_embeddings = np.array(query_embeddings)
            key_embeddings = np.array(key_embeddings)
        else:
            query_embeddings = self.flag_embedder.encode_queries(queries, task=task)
            key_embeddings = self.flag_embedder.encode_keys(texts, task=task)

        # 向量相似性
        similarities = query_embeddings @ key_embeddings.T

        top_similar_indexes = {}
        for query_index, similarity in enumerate(similarities):
            text_similarity = [[text_index, score] for text_index, score in enumerate(similarity)]
            text_similarity = sorted(text_similarity, key=lambda x: x[1], reverse=True)[: top_n]
            top_similar_indexes[query_index] = text_similarity

        return top_similar_indexes

    def query_similar_text(self, queries, sorted_texts: list[dict], task=None, top_n=None):
        text_indexes = []
        texts = []
        # 删除空字符
        actual_texts = [_ for _ in sorted_texts if len(_['text'].strip()) > 0]
        for _ in actual_texts:
            text_indexes.append(_['index'])
            texts.append(_['text'])

        # 获取相似文档
        time_1 = time.time()
        top_similar_indexes: dict = self.similar_paragraph_with_embedding(queries, texts, task, top_n)
        time_2 = time.time()
        embedding_time = time_2 - time_1
        print('向量化时间', embedding_time)
        query_responses = {}
        for query_i, similar_i in top_similar_indexes.items():
            appropriate_text, appropriate_index = combine_appropriate_text(texts, similar_i, MAX_LEN_TEXT)
            query_responses[query_i] = {
                'query_index': query_i,
                'query': queries[query_i],
                'response': {
                    'content': appropriate_text,
                    'similar_index': [{'index': text_indexes[i[0]], 'score': float(i[1])} for i in appropriate_index]
                },
            }
        return query_responses

    def answer_with_rag(self, queries, sorted_texts: list, task=None, top_n=None):
        """
        通过简单的向量相似性结合LLM回答问题
        :param queries:
        :param sorted_texts: 带索引的index
        :param task:
        :param top_n:
        :return:
        """
        '''
        转换text
        '''
        text_indexes = []
        texts = []
        # 删除空字符
        actual_texts = [_ for _ in sorted_texts if len(_['text'].strip()) > 0]
        for _ in actual_texts:
            text_indexes.append(_['index'])
            texts.append(_['text'])

        # 获取相似文档
        top_similar_indexes: dict = self.similar_paragraph_with_embedding(queries, texts, task, top_n)
        query_responses = {}
        for query_index, similar_index in top_similar_indexes.items():
            appropriate_text, appropriate_index = combine_appropriate_text(texts, similar_index, MAX_LEN_TEXT)
            prompt_args = {
                'contract_text': appropriate_text,
                'review_point': queries[query_index]
            }
            # 文档模板采用暂时采用默认模板
            answer = self.answer_with_prompt(SIMPLE_CONTRACT_PROMPT, prompt_args)
            query_responses[query_index] = {
                'query_index': query_index,
                'query': queries[query_index],
                'response': {
                    'content': answer.content,
                    'select_index': [{'index': text_indexes[i[0]], 'score': float(i[1])} for i in appropriate_index]
                },
            }
        return query_responses

    def answer_with_rag_threading(self, queries, sorted_texts: list, task=None, top_n=None):
        text_indexes = []
        texts = []
        # 删除空字符
        actual_texts = [_ for _ in sorted_texts if len(_['text'].strip()) > 0]
        for _ in actual_texts:
            text_indexes.append(_['index'])
            texts.append(_['text'])

        # 获取相似文档
        time_1 = time.time()
        top_similar_indexes: dict = self.similar_paragraph_with_embedding(queries, texts, task, top_n)
        time_2 = time.time()
        embedding_time = time_2 - time_1
        query_responses = {}

        def _answer_from_llm(query_index, similar_index):
            appropriate_text, appropriate_index = combine_appropriate_text(texts, similar_index, MAX_LEN_TEXT)
            prompt_args = {
                'contract_text': appropriate_text,
                'review_point': queries[query_index]
            }
            # 文档模板采用暂时采用默认模板
            answer = self.answer_with_prompt(SIMPLE_CONTRACT_PROMPT, prompt_args)
            query_responses[query_index] = {
                'query_index': query_index,
                'query': queries[query_index],
                'response': {
                    'content': answer.content,
                    'select_index': [{'index': text_indexes[i[0]], 'score': float(i[1])} for i in appropriate_index]
                },
            }

        time_1 = time.time()
        threads = []
        for query_i, similar_i in top_similar_indexes.items():
            thread = threading.Thread(target=_answer_from_llm, args=(query_i, similar_i))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()
        time_2 = time.time()
        llm_time = time_2 - time_1
        msg = '向量化时间： %s, 大模型问答时间： %s, 总时间： %s' % (embedding_time, llm_time, embedding_time + llm_time)
        print(msg)
        logging.info(msg)
        return query_responses

    def answer_with_prompt(self, prompt_template, prompt_args):
        """
        根据问题回答prompt
        :param prompt_template: prompt模板
        :param prompt_args: 模板参数
        :return:
        """
        # 获取最相似的段落索引
        llm_query = prompt_template.format(**prompt_args)

        # 获取大模型回答
        answer = self.llm_client.invoke(llm_query)
        return answer


def combine_appropriate_text(texts, similar_index, max_length):
    """
    因为模型处理的文本长度有限制 所以选择能满足的最大长度
    :param texts: 文本段落
    :param similar_index: 段落得分
    :param max_length:  最长的文本长度
    :return:
    """
    need_length = 0
    appropriate_index = []
    for index, score in similar_index:
        if need_length + len(texts[index]) <= max_length:
            appropriate_index.append([index, score])
            need_length += len(texts[index])
        else:
            break

    if len(appropriate_index) == 0:
        raise Exception('相似段落文本超长')

    appropriate_text = '\n'.join([texts[index] for index, score in appropriate_index])

    return appropriate_text, appropriate_index

