# encoding: utf-8
# encoding: utf-8
"""
新增一个简单的RAG交互
"""
import time
from typing import List
import numpy as np
from FlagEmbedding import LLMEmbedder
from ..config.base import (
    EMBEDDING_MODEL_PATH,
    MAX_LEN_TEXT,
    EMBEDDING_DTYPE
)


class Embedding(object):
    top_similar_text = 10  # 返回最相似的段落
    max_tokens = 4096  # 最大tokens
    streaming = False
    embedder_task = 'qa'

    def __init__(self):
        self.flag_embedder = LLMEmbedder(EMBEDDING_MODEL_PATH, use_fp16=False)

    def similar_paragraph_with_embedding(self, queries: List, texts: List, task=None, top_n=None):
        """
        改为线性的方式
        :param queries: 问题集合
        :param texts: 段落组合 带索引
        :param task: 检索类型
        :param top_n: 返回最相似的N个文档
        :return:
        """
        if task is None:
            task = self.embedder_task

        if top_n is None:
            top_n = self.top_similar_text

        if isinstance(queries, str):
            queries = [queries]

        if EMBEDDING_DTYPE == 'local':
            query_embeddings = []
            key_embeddings = []
            for query in queries:
                query_embedding = self.flag_embedder.encode_queries(query, task=task)
                query_embeddings.append(query_embedding)

            for text in texts:
                key_embedding = self.flag_embedder.encode_keys(text, task=task)
                key_embeddings.append(key_embedding)

            query_embeddings = np.array(query_embeddings)
            key_embeddings = np.array(key_embeddings)
        else:
            query_embeddings = self.flag_embedder.encode_queries(queries, task=task)
            key_embeddings = self.flag_embedder.encode_keys(texts, task=task)

        # 向量相似性
        similarities = query_embeddings @ key_embeddings.T

        top_similar_indexes = {}
        for query_index, similarity in enumerate(similarities):
            text_similarity = [[text_index, score] for text_index, score in enumerate(similarity)]
            text_similarity = sorted(text_similarity, key=lambda x: x[1], reverse=True)[: top_n]
            top_similar_indexes[query_index] = text_similarity

        return top_similar_indexes

    def query_similar_text(self, queries, sorted_texts: list[dict], task=None, top_n=None):
        text_indexes = []
        texts = []
        # 删除空字符
        actual_texts = [_ for _ in sorted_texts if len(_['text'].strip()) > 0]
        for _ in actual_texts:
            text_indexes.append(_['index'])
            texts.append(_['text'])

        # 获取相似文档
        time_1 = time.time()
        top_similar_indexes: dict = self.similar_paragraph_with_embedding(queries, texts, task, top_n)
        time_2 = time.time()
        embedding_time = time_2 - time_1
        print('向量化时间', embedding_time)
        query_responses = {}
        for query_i, similar_i in top_similar_indexes.items():
            appropriate_text, appropriate_index = combine_appropriate_text(texts, similar_i, MAX_LEN_TEXT)
            query_responses[query_i] = {
                'query_index': query_i,
                'query': queries[query_i],
                'response': {
                    'content': appropriate_text,
                    'similar_index': [{'index': text_indexes[i[0]], 'score': float(i[1])} for i in appropriate_index]
                },
            }
        return query_responses


def combine_appropriate_text(texts, similar_index, max_length):
    """
    因为模型处理的文本长度有限制 所以选择能满足的最大长度
    :param texts: 文本段落
    :param similar_index: 段落得分
    :param max_length:  最长的文本长度
    :return:
    """
    need_length = 0
    appropriate_index = []
    for index, score in similar_index:
        if need_length + len(texts[index]) <= max_length:
            appropriate_index.append([index, score])
            need_length += len(texts[index])
        else:
            break

    if len(appropriate_index) == 0:
        raise Exception('相似段落文本超长')

    appropriate_text = '\n'.join([texts[index] for index, score in appropriate_index])

    return appropriate_text, appropriate_index