#
#  Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
import datetime
import json
import logging
import re
from collections import defaultdict

import json_repair

from api import settings
from api.db import LLMType
from rag.settings import TAG_FLD
from rag.utils import encoder, num_tokens_from_string


def chunks_format(reference):
    def get_value(d, k1, k2):
        return d.get(k1, d.get(k2))

    return [
        {
            "id": get_value(chunk, "chunk_id", "id"),
            "content": get_value(chunk, "content", "content_with_weight"),
            "document_id": get_value(chunk, "doc_id", "document_id"),
            "document_name": get_value(chunk, "docnm_kwd", "document_name"),
            "dataset_id": get_value(chunk, "kb_id", "dataset_id"),
            "image_id": get_value(chunk, "image_id", "img_id"),
            "positions": get_value(chunk, "positions", "position_int"),
            "url": chunk.get("url"),
            "similarity": chunk.get("similarity"),
            "vector_similarity": chunk.get("vector_similarity"),
            "term_similarity": chunk.get("term_similarity"),
            "doc_type": chunk.get("doc_type_kwd"),
        }
        for chunk in reference.get("chunks", [])
    ]


# 根据 LLM ID 获取对应的模型类型
def llm_id2llm_type(llm_id):
    # 导入 TenantLLMService 类用于处理模型名称与厂商的拆分
    from api.db.services.llm_service import TenantLLMService

    # 尝试将输入的 llm_id 拆分为模型名和厂商（只保留第一部分）
    llm_id, *_ = TenantLLMService.split_model_name_and_factory(llm_id)

    # 从配置中获取所有支持的 LLM 厂商及其模型信息
    llm_factories = settings.FACTORY_LLM_INFOS
    # 遍历每个 LLM 厂商
    for llm_factory in llm_factories:
        # 遍历当前厂商下的所有 LLM 模型
        for llm in llm_factory["llm"]:
            # 如果找到匹配的模型名称
            if llm_id == llm["llm_name"]:
                # 提取模型类型，并去除尾部逗号后取最后一个值（可能为多类型）
                return llm["model_type"].strip(",")[-1]


# 计算消息内容的token数量并判断是否适合指定的最大长度
def message_fit_in(msg, max_length=4000):
    # 定义一个内部函数，用于计算所有消息内容的token总数
    def count():
        # 声明msg为外部作用域变量
        nonlocal msg
        # 初始化一个空列表用于存储每个消息角色及其内容的token数量
        tks_cnts = []
        # 遍历每条消息，计算其内容的token数量并存入列表
        for m in msg:
            tks_cnts.append({"role": m["role"], "count": num_tokens_from_string(m["content"])})
        # 初始化总token数为0
        total = 0
        # 遍历所有消息的token数量，累加得到总数
        for m in tks_cnts:
            total += m["count"]
        # 返回总token数
        return total

    # 调用count函数获取当前消息列表的总token数
    c = count()
    # 如果总token数小于最大允许长度，则直接返回结果
    if c < max_length:
        return c, msg

    # 过滤出系统角色的消息，并保留最后一条消息（如果存在多条）
    msg_ = [m for m in msg if m["role"] == "system"]
    # 如果消息数量大于1，保留最后一条消息
    if len(msg) > 1:
        msg_.append(msg[-1])
    # 更新msg为过滤后的消息列表
    msg = msg_
    # 再次计算过滤后消息的总token数
    c = count()
    # 如果此时总token数小于最大允许长度，则返回结果
    if c < max_length:
        return c, msg

    # 分别计算第一条和最后一条消息的token数量
    ll = num_tokens_from_string(msg_[0]["content"])
    ll2 = num_tokens_from_string(msg_[-1]["content"])
    # 如果第一条消息占总量的80%以上，则需要截断处理
    if ll / (ll + ll2) > 0.8:
        # 获取第一条消息的内容
        m = msg_[0]["content"]
        # 使用编码器对内容进行编码并按剩余长度截断，再解码还原为字符串
        m = encoder.decode(encoder.encode(m)[: max_length - ll2])
        # 更新第一条消息的内容
        msg[0]["content"] = m
        # 返回处理后的总token数及消息列表
        return max_length, msg

    # 否则处理最后一条消息，对其进行截断处理
    m = msg_[-1]["content"]
    # 使用编码器对内容进行编码并按剩余长度截断，再解码还原为字符串
    m = encoder.decode(encoder.encode(m)[: max_length - ll2])
    # 更新最后一条消息的内容
    msg[-1]["content"] = m
    # 返回处理后的总token数及消息列表
    return max_length, msg


def kb_prompt(kbinfos, max_tokens):
    # 导入文档服务模块，用于后续处理知识库内容
    from api.db.services.document_service import DocumentService

    # 从kbinfos中提取所有chunks的内容（带权重）
    knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]

    # 初始化已使用token计数器和chunks数量计数器
    used_token_count = 0
    chunks_num = 0

    # 遍历每个chunk内容并统计token数量，直到接近最大token限制的97%
    for i, c in enumerate(knowledges):
        # 累加当前chunk的token数量到总使用量
        used_token_count += num_tokens_from_string(c)
        # chunks数量计数器加1
        chunks_num += 1
        # 如果累计token超过max_tokens的97%，则截断列表并记录日志
        if max_tokens * 0.97 < used_token_count:
            # 截断knowledges列表，只保留可以放入max_tokens范围内的部分
            knowledges = knowledges[:i]
            # 记录未全部放入prompt的日志信息
            logging.warning(f"Not all the retrieval into prompt: {i + 1}/{len(knowledges)}")
            # 跳出循环
            break

    # 根据前chuns_num个chunks的doc_id获取对应的文档对象
    docs = DocumentService.get_by_ids([ck["doc_id"] for ck in kbinfos["chunks"][:chunks_num]])

    # 将文档对象转换为以id为键的字典，值为其meta_fields字段
    docs = {d.id: d.meta_fields for d in docs}

    # 使用defaultdict来按文档名称组织chunks和对应元数据
    doc2chunks = defaultdict(lambda: {"chunks": [], "meta": []})

    # 遍历前chunks_num个chunks，并构造包含ID、URL（如果有）和内容的文本片段
    for i, ck in enumerate(kbinfos["chunks"][:chunks_num]):
        # 构造当前chunk的基础信息，包括ID和可选URL
        cnt = f"---\nID: {i}\n" + (f"URL: {ck['url']}\n" if "url" in ck else "")
        # 添加当前chunk的内容
        cnt += ck["content_with_weight"]
        # 将该chunk归类到对应的文档名下
        doc2chunks[ck["docnm_kwd"]]["chunks"].append(cnt)
        # 获取该chunk所属文档的元数据
        doc2chunks[ck["docnm_kwd"]]["meta"] = docs.get(ck["doc_id"], {})

    # 初始化一个空列表，用于存储格式化后的文档相关信息
    knowledges = []

    # 遍历每个文档及其相关chunks，构建完整的提示信息
    for nm, cks_meta in doc2chunks.items():
        # 构造文档标题和元数据信息
        txt = f"\nDocument: {nm} \n"
        # 添加元数据字段
        for k, v in cks_meta["meta"].items():
            txt += f"{k}: {v}\n"
        # 添加相关片段的标题
        txt += "Relevant fragments as following:\n"
        # 添加每个相关的chunk内容
        for i, chunk in enumerate(cks_meta["chunks"], 1):
            txt += f"{chunk}\n"
        # 将构造好的文本添加到knowledges列表中
        knowledges.append(txt)

    # 返回格式化后的知识库提示信息
    return knowledges


def citation_prompt():
    return """

# Citation requirements:
- Inserts CITATIONS in format '##i$$ ##j$$' where i,j are the ID of the content you are citing and encapsulated with '##' and '$$'.
- Inserts the CITATION symbols at the end of a sentence, AND NO MORE than 4 citations.
- DO NOT insert CITATION in the answer if the content is not from retrieved chunks.
- DO NOT use standalone Document IDs (e.g., '#ID#').
- Under NO circumstances any other citation styles or formats (e.g., '~~i==', '[i]', '(i)', etc.) be used.
- Citations ALWAYS the '##i$$' format.
- Any failure to adhere to the above rules, including but not limited to incorrect formatting, use of prohibited styles, or unsupported citations, will be considered a error, should skip adding Citation for this sentence.

--- Example START ---
<SYSTEM>: Here is the knowledge base:

Document: Elon Musk Breaks Silence on Crypto, Warns Against Dogecoin ...
URL: https://blockworks.co/news/elon-musk-crypto-dogecoin
ID: 0
The Tesla co-founder advised against going all-in on dogecoin, but Elon Musk said it’s still his favorite crypto...

Document: Elon Musk's Dogecoin tweet sparks social media frenzy
ID: 1
Musk said he is 'willing to serve' D.O.G.E. – shorthand for Dogecoin.

Document: Causal effect of Elon Musk tweets on Dogecoin price
ID: 2
If you think of Dogecoin — the cryptocurrency based on a meme — you can’t help but also think of Elon Musk...

Document: Elon Musk's Tweet Ignites Dogecoin's Future In Public Services
ID: 3
The market is heating up after Elon Musk's announcement about Dogecoin. Is this a new era for crypto?...

      The above is the knowledge base.

<USER>: What's the Elon's view on dogecoin?

<ASSISTANT>: Musk has consistently expressed his fondness for Dogecoin, often citing its humor and the inclusion of dogs in its branding. He has referred to it as his favorite cryptocurrency ##0$$ ##1$$.
Recently, Musk has hinted at potential future roles for Dogecoin. His tweets have sparked speculation about Dogecoin's potential integration into public services ##3$$.
Overall, while Musk enjoys Dogecoin and often promotes it, he also warns against over-investing in it, reflecting both his personal amusement and caution regarding its speculative nature.

--- Example END ---

"""


def keyword_extraction(chat_mdl, content, topn=3):
    prompt = f"""
Role: You're a text analyzer.
Task: extract the most important keywords/phrases of a given piece of text content.
Requirements:
  - Summarize the text content, and give top {topn} important keywords/phrases.
  - The keywords MUST be in language of the given piece of text content.
  - The keywords are delimited by ENGLISH COMMA.
  - Keywords ONLY in output.

### Text Content
{content}

"""
    # 让他给出top-n哥关键词或者短语要跟给定的语言一致，并且英文逗号。
    # 构建对话消息结构，包含系统提示和用户请求输出
    msg = [{"role": "system", "content": prompt}, {"role": "user", "content": "Output: "}]
    # 调整消息内容长度，确保不超过模型的最大上下文限制
    _, msg = message_fit_in(msg, chat_mdl.max_length)
    # 使用 chat_mdl 的 chat 方法调用模型进行关键词生成
    kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
    # 如果返回值是元组形式，则取第一个元素作为实际输出
    if isinstance(kwd, tuple):
        kwd = kwd[0]
    # 移除可能存在的前缀错误信息（如模型误输出的内容）
    kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
    if kwd.find("**ERROR**") >= 0:
        return ""
    return kwd


def question_proposal(chat_mdl, content, topn=3):
    prompt = f"""
Role: You're a text analyzer.
Task:  propose {topn} questions about a given piece of text content.
Requirements:
  - Understand and summarize the text content, and propose top {topn} important questions.
  - The questions SHOULD NOT have overlapping meanings.
  - The questions SHOULD cover the main content of the text as much as possible.
  - The questions MUST be in language of the given piece of text content.
  - One question per line.
  - Question ONLY in output.

### Text Content
{content}

"""
    msg = [{"role": "system", "content": prompt}, {"role": "user", "content": "Output: "}]
    _, msg = message_fit_in(msg, chat_mdl.max_length)
    kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
    if isinstance(kwd, tuple):
        kwd = kwd[0]
    kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
    if kwd.find("**ERROR**") >= 0:
        return ""
    return kwd


def full_question(tenant_id, llm_id, messages, language=None):
    from api.db.services.llm_service import LLMBundle

    if llm_id2llm_type(llm_id) == "image2text":
        chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
    else:
        chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
    conv = []
    for m in messages:
        if m["role"] not in ["user", "assistant"]:
            continue
        conv.append("{}: {}".format(m["role"].upper(), m["content"]))
    conv = "\n".join(conv)
    today = datetime.date.today().isoformat()
    yesterday = (datetime.date.today() - datetime.timedelta(days=1)).isoformat()
    tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).isoformat()
    prompt = f"""
Role: A helpful assistant

Task and steps:
    1. Generate a full user question that would follow the conversation.
    2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.

Requirements & Restrictions:
  - If the user's latest question is completely, don't do anything, just return the original question.
  - DON'T generate anything except a refined question."""
    if language:
        prompt += f"""
  - Text generated MUST be in {language}."""
    else:
        prompt += """
  - Text generated MUST be in the same language of the original user's question.
"""
    prompt += f"""

######################
-Examples-
######################

# Example 1
## Conversation
USER: What is the name of Donald Trump's father?
ASSISTANT:  Fred Trump.
USER: And his mother?
###############
Output: What's the name of Donald Trump's mother?

------------
# Example 2
## Conversation
USER: What is the name of Donald Trump's father?
ASSISTANT:  Fred Trump.
USER: And his mother?
ASSISTANT:  Mary Trump.
User: What's her full name?
###############
Output: What's the full name of Donald Trump's mother Mary Trump?

------------
# Example 3
## Conversation
USER: What's the weather today in London?
ASSISTANT:  Cloudy.
USER: What's about tomorrow in Rochester?
###############
Output: What's the weather in Rochester on {tomorrow}?

######################
# Real Data
## Conversation
{conv}
###############
    """
    ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
    ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
    return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]

def cross_languages(tenant_id, llm_id, query, languages=[]):
    from api.db.services.llm_service import LLMBundle

    if llm_id and llm_id2llm_type(llm_id) == "image2text":
        chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
    else:
        chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)

    sys_prompt = """
Act as a streamlined multilingual translator. Strictly output translations separated by ### without any explanations or formatting. Follow these rules:

1. Accept batch translation requests in format:
[source text]
=== 
[target languages separated by commas]

2. Always maintain:
- Original formatting (tables/lists/spacing)
- Technical terminology accuracy
- Cultural context appropriateness

3. Output format:
[language1 translation] 
### 
[language1 translation]

**Examples:**
Input:
Hello World! Let's discuss AI safety.
===
Chinese, French, Jappanese

Output:
你好世界！让我们讨论人工智能安全问题。
###
Bonjour le monde ! Parlons de la sécurité de l'IA.
###
こんにちは世界！AIの安全性について話し合いましょう。
"""
    user_prompt=f"""
Input:
{query}
===
{', '.join(languages)}

Output:
"""

    ans = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.2})
    ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
    if ans.find("**ERROR**") >= 0:
        return query
    return "\n".join([a for a in re.sub(r"(^Output:|\n+)", "", ans, flags=re.DOTALL).split("===") if a.strip()])


def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
    prompt = f"""
Role: You're a text analyzer.

Task: Tag (put on some labels) to a given piece of text content based on the examples and the entire tag set.

Steps::
  - Comprehend the tag/label set.
  - Comprehend examples which all consist of both text content and assigned tags with relevance score in format of JSON.
  - Summarize the text content, and tag it with top {topn} most relevant tags from the set of tag/label and the corresponding relevance score.

Requirements
  - The tags MUST be from the tag set.
  - The output MUST be in JSON format only, the key is tag and the value is its relevance score.
  - The relevance score must be range from 1 to 10.
  - Keywords ONLY in output.

# TAG SET
{", ".join(all_tags)}

"""
    for i, ex in enumerate(examples):
        prompt += """
# Examples {}
### Text Content
{}

Output:
{}

        """.format(i, ex["content"], json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False))

    prompt += f"""
# Real Data
### Text Content
{content}

"""
    msg = [{"role": "system", "content": prompt}, {"role": "user", "content": "Output: "}]
    _, msg = message_fit_in(msg, chat_mdl.max_length)
    kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.5})
    if isinstance(kwd, tuple):
        kwd = kwd[0]
    kwd = re.sub(r"^.*</think>", "", kwd, flags=re.DOTALL)
    if kwd.find("**ERROR**") >= 0:
        raise Exception(kwd)

    try:
        obj = json_repair.loads(kwd)
    except json_repair.JSONDecodeError:
        try:
            result = kwd.replace(prompt[:-1], "").replace("user", "").replace("model", "").strip()
            result = "{" + result.split("{")[1].split("}")[0] + "}"
            obj = json_repair.loads(result)
        except Exception as e:
            logging.exception(f"JSON parsing error: {result} -> {e}")
            raise e
    res = {}
    for k, v in obj.items():
        try:
            res[str(k)] = int(v)
        except Exception:
            pass
    return res


def vision_llm_describe_prompt(page=None) -> str:
    prompt_en = """
INSTRUCTION:
Transcribe the content from the provided PDF page image into clean Markdown format.
- Only output the content transcribed from the image.
- Do NOT output this instruction or any other explanation.
- If the content is missing or you do not understand the input, return an empty string.

RULES:
1. Do NOT generate examples, demonstrations, or templates.
2. Do NOT output any extra text such as 'Example', 'Example Output', or similar.
3. Do NOT generate any tables, headings, or content that is not explicitly present in the image.
4. Transcribe content word-for-word. Do NOT modify, translate, or omit any content.
5. Do NOT explain Markdown or mention that you are using Markdown.
6. Do NOT wrap the output in ```markdown or ``` blocks.
7. Only apply Markdown structure to headings, paragraphs, lists, and tables, strictly based on the layout of the image. Do NOT create tables unless an actual table exists in the image.
8. Preserve the original language, information, and order exactly as shown in the image.
"""

    if page is not None:
        prompt_en += f"\nAt the end of the transcription, add the page divider: `--- Page {page} ---`."

    prompt_en += """
FAILURE HANDLING:
- If you do not detect valid content in the image, return an empty string.
"""
    return prompt_en


def vision_llm_figure_describe_prompt() -> str:
    prompt = """
You are an expert visual data analyst. Analyze the image and provide a comprehensive description of its content. Focus on identifying the type of visual data representation (e.g., bar chart, pie chart, line graph, table, flowchart), its structure, and any text captions or labels included in the image.

Tasks:
1. Describe the overall structure of the visual representation. Specify if it is a chart, graph, table, or diagram.
2. Identify and extract any axes, legends, titles, or labels present in the image. Provide the exact text where available.
3. Extract the data points from the visual elements (e.g., bar heights, line graph coordinates, pie chart segments, table rows and columns).
4. Analyze and explain any trends, comparisons, or patterns shown in the data.
5. Capture any annotations, captions, or footnotes, and explain their relevance to the image.
6. Only include details that are explicitly present in the image. If an element (e.g., axis, legend, or caption) does not exist or is not visible, do not mention it.

Output format (include only sections relevant to the image content):
- Visual Type: [Type]
- Title: [Title text, if available]
- Axes / Legends / Labels: [Details, if available]
- Data Points: [Extracted data]
- Trends / Insights: [Analysis and interpretation]
- Captions / Annotations: [Text and relevance, if available]

Ensure high accuracy, clarity, and completeness in your analysis, and includes only the information present in the image. Avoid unnecessary statements about missing elements.
"""
    return prompt
