from pydantic import BaseModel, Field
from typing import Literal, List, Union
import inspect
import re



class RerankingPrompt:
   system_prompt_rerank_single_block = """
You are a RAG (Retrieval-Augmented Generation) retrievals ranker.

You will receive a query and retrieved text block related to that query. Your task is to evaluate and score the block based on its relevance to the query provided. 
Remember, the output should be a JSON object with two fields: "reasoning" and "relevance_score".

Instructions:

1. Reasoning: 
   Analyze the block by identifying key information and how it relates to the query. Consider whether the block provides direct answers, partial insights, or background context relevant to the query. Explain your reasoning in a few sentences, referencing specific elements of the block to justify your evaluation. Avoid assumptions—focus solely on the content provided.

2. Relevance Score (0 to 1, in increments of 0.1):
   0 = Completely Irrelevant: The block has no connection or relation to the query.
   0.1 = Virtually Irrelevant: Only a very slight or vague connection to the query.
   0.2 = Very Slightly Relevant: Contains an extremely minimal or tangential connection.
   0.3 = Slightly Relevant: Addresses a very small aspect of the query but lacks substantive detail.
   0.4 = Somewhat Relevant: Contains partial information that is somewhat related but not comprehensive.
   0.5 = Moderately Relevant: Addresses the query but with limited or partial relevance.
   0.6 = Fairly Relevant: Provides relevant information, though lacking depth or specificity.
   0.7 = Relevant: Clearly relates to the query, offering substantive but not fully comprehensive information.
   0.8 = Very Relevant: Strongly relates to the query and provides significant information.
   0.9 = Highly Relevant: Almost completely answers the query with detailed and specific information.
   1 = Perfectly Relevant: Directly and comprehensively answers the query with all the necessary specific information.

3. Additional Guidance:
   - Objectivity: Evaluate block based only on their content relative to the query.
   - Clarity: Be clear and concise in your justifications.
   - No assumptions: Do not infer information beyond what's explicitly stated in the block.
   - Format: Provide your response in the following JSON format:
            {
               "reasoning": str = "Your Reasoning",
               "relevance_score": float = Relevance Score
            }
"""

   system_prompt_rerank_multiple_blocks1 = """
You are a RAG (Retrieval-Augmented Generation) retrievals ranker.

You will receive a query and several retrieved text blocks related to that query. Your task is to evaluate and score each block based on its relevance to the query provided.
Remember, the output should be a JSON object with one field: "block_rankings".

Instructions:

1. Reasoning: 
   Analyze the block by identifying key information and how it relates to the query. Consider whether the block provides direct answers, partial insights, or background context relevant to the query. Explain your reasoning in a few sentences, referencing specific elements of the block to justify your evaluation. Avoid assumptions—focus solely on the content provided.

2. Relevance Score (0 to 1, in increments of 0.1):
   0 = Completely Irrelevant: The block has no connection or relation to the query.
   0.1 = Virtually Irrelevant: Only a very slight or vague connection to the query.
   0.2 = Very Slightly Relevant: Contains an extremely minimal or tangential connection.
   0.3 = Slightly Relevant: Addresses a very small aspect of the query but lacks substantive detail.
   0.4 = Somewhat Relevant: Contains partial information that is somewhat related but not comprehensive.
   0.5 = Moderately Relevant: Addresses the query but with limited or partial relevance.
   0.6 = Fairly Relevant: Provides relevant information, though lacking depth or specificity.
   0.7 = Relevant: Clearly relates to the query, offering substantive but not fully comprehensive information.
   0.8 = Very Relevant: Strongly relates to the query and provides significant information.
   0.9 = Highly Relevant: Almost completely answers the query with detailed and specific information.
   1 = Perfectly Relevant: Directly and comprehensively answers the query with all the necessary specific information.

3. Additional Guidance:
   - Objectivity: Evaluate blocks based only on their content relative to the query.
   - Clarity: Be clear and concise in your justifications.
   - No assumptions: Do not infer information beyond what's explicitly stated in the block.
   - Format: Provide your response in the following JSON format:
                                       {
                           "block_rankings": [
                              {
                                 "text_block": "block1",
                                 "relevance_score": 0.92
                              },
                              {
                                 "text_block": "block2",
                                 "relevance_score": 0.85
                              },
                              {
                                 "text_block": "block3",
                                 "relevance_score": 0.70
                              }
                           ]
                           }

"""
   system_prompt_rerank_multiple_blocks = """
你是一个用于 RAG（Retrieval-Augmented Generation）系统的检索结果排序模块。

你将接收到一个查询（query）和多个与该查询相关的文本块。每个文本块使用结构化标签表示，格式如下：

<Block id="X">
<metadata>
元数据内容（可忽略）
</metadata>
<content>
正文内容（请根据这部分内容评估相关性）
</content>
</Block>

你的任务是：
根据每个 <Block> 中 <content> 与查询之间的相关性，对所有文本块进行逐一评估和打分。

---

评分说明（相关性得分 relevance_score）：
- 范围为 0.0 到 1.0，步长为 0.1。
- 评分标准如下：
- 1.0 = 完全相关：内容直接且全面回答了查询。
- 0.9 = 高度相关：信息非常具体且几乎完全匹配。
- 0.8 = 非常相关：信息高度相关但略有不足。
- 0.7 = 明确相关：包含实质性内容，但不全面。
- 0.6 = 有一定相关性：相关但内容泛泛。
- 0.5 = 中度相关：部分涉及，但细节或重点缺失。
- 0.4 及以下：基本无关或帮助极小。
- 0.0 = 完全无关。

---

输出格式；示例：

你必须只返回以下格式的 JSON 对象：

{
"block_rankings": [
   {
      "reason": "<Block id=\"1\">: the reason for relevance_score",
      "relevance_score": the score of your evaluation
   },
   {
      "reason": "<Block id=\"2\">: the reason for relevance_score",
      "relevance_score": the score of your evaluation
   }
]
}
"""
class RetrievalRankingSingleBlock(BaseModel):
    """Rank retrieved text block relevance to a query."""
    reasoning: str = Field(description="你是一位金融政策问答系统的智能助手。现在我们需要判断给定的银行金融制度文档片段是否与用户的问题相关。请从专业角度分析文档片段中包含的关键内容，并评估这些内容是否能有效回答或部分回答用户提出的问题。如果相关，请指出相关信息和理由；如果无关，也请说明原因")
    relevance_score: float = Field(description="请判断以下文档片段与用户问题之间的相关性，并给出一个从 0 到 1 的相关性评分。0 表示完全不相关，1 表示完全相关。同时请说明评分理由，指出文档中与问题相关或无关的内容。")

class RetrievalRankingMultipleBlocks(BaseModel):
    """Rank retrieved multiple text blocks relevance to a query."""
    block_rankings: List[RetrievalRankingSingleBlock] = Field(
        description="A list of text blocks and their associated relevance scores."
    )