import re
from prompts.query_classify_prompt import QUERY_CLASSIFY_PROMPT_SYSTEM
from my_utils.llm_client import llm_chat

class ProblemRewriter:
    def __init__(self):
        self.rewrite_prompt_template = """
任务：根据用户提供的“原始需求”和“综述框架类别”，执行以下操作：
1.  深入分析用户的核心技术研究意图或检索目标。
2.  提取3个最能代表该核心意图的中文技术关键词，关键词之间用**单个空格**分隔。这些关键词应聚焦于具体技术、算法、模型、概念或方法，避免宽泛描述。
3.  基于分析出的核心意图，生成5个不同的、简洁且精确的**英文**查询语句。这些语句应适合在向量数据库中进行检索。查询语句应具有多样性，可以从不同角度切入核心意图。
4.  严格按照以下格式输出，不要包含任何额外的解释、前缀或编号外的标记：

Keywords: keyword1 keyword2 keyword3
Query1: 生成的第一个英文查询语句
Query2: 生成的第二个英文查询语句
Query3: 生成的第三个英文查询语句
Query4: 生成的第四个英文查询语句
Query5: 生成的第五个英文查询语句

---
输入信息：
原始需求：{original_requirement}
综述框架类别：{classification}
---

请立即开始生成结果：
"""

    def rewrite(self, original_requirement: str, classification: str) -> list[str]:
        """ 重写用户需求并生成关键词和查询语句 """
        prompt = self.rewrite_prompt_template.format(
            original_requirement=original_requirement,
            classification=classification
        )

        results = []  # 初始化结果列表

        try:
            # 调用 LLM，根据你的实际函数调整参数
            response = llm_chat(
                messages=[{"role": "user", "content": prompt}],
                model_name="GLM_4_PLUS",
                temperature=0.1,
                max_tokens=500
            )

            if response and response.choices and response.choices[0].message:
                content = response.choices[0].message.content.strip()

                # 1. 使用正则表达式提取关键词
                keywords_match = re.search(r"^Keywords:\s*(.*)", content, re.MULTILINE)
                if keywords_match:
                    keywords = keywords_match.group(1).strip()
                    if keywords:  # 确保提取的关键词不为空
                        results.append(keywords)
                    else:
                        print("LLM返回了'Keywords:'标签，但内容为空。")
                else:
                    print("无法从LLM响应中提取关键词。")
                    # 根据需求，这里可以选择返回空列表或继续尝试提取查询

                # 2. 使用正则表达式提取所有查询语句 (Query1, Query2, ...)
                query_matches = re.findall(r"^Query\d+:\s*(.*)", content, re.MULTILINE)
                if query_matches:
                    for query in query_matches:
                        query_clean = query.strip()
                        if query_clean:  # 确保提取的查询不为空
                            results.append(query_clean)
                else:
                    print("无法从LLM响应中提取任何查询语句。")

                if not results:  # 如果关键词和查询都没提取到
                    print(f"LLM响应格式不符合预期，无法解析。\n响应内容:\n{content}")
                    return []  # 返回空列表表示失败

                return results

            else:
                print("LLM调用失败或返回了无效的响应结构。")
                return []  # 返回空列表表示失败

        except Exception as e:
            print(f"处理重写请求时发生异常: {e}")  # 记录完整异常信息
            return []  # 异常情况下返回空列表


class RequirementClassifier:
    def __init__(self):
        self.max_retry_times = 5
    
    def classify(self, user_requirement):
        messages = [
            {"role": "system", "content": QUERY_CLASSIFY_PROMPT_SYSTEM},
            {"role": "user", "content": "What are the current challenges in the field of Text2SQL?"},
            {"role": "assistant", "content": '<think>The user is asking about "current challenges" in "Text2SQL," which is a specific research area. This indicates a focus on the present state and problems within that field, aligning with a State-of-the-Art Review.</think>\n<anwser>b</anwser>'},
            {"role": "user", "content": user_requirement}
        ]

        retry_times = 0
        temperature_temp = 0
        while retry_times < self.max_retry_times:
            response = llm_chat(messages=messages,model_name="GLM_4_PLUS",temperature=temperature_temp if temperature_temp <= 1 else 1)
            if response:
                response_content = response.choices[0].message.content.strip().lower()
                choice = self.extract_and_validate_response(response_content)
                if choice == "":
                    temperature_temp += 0.1
                    retry_times += 1
                else:
                    return choice
            else:
                retry_times += 1
                print(f"模型未返回响应，即将重试第 {retry_times} 次...")
        return None

    def extract_and_validate_response(self, response_content):
        """ 提取和验证模型响应中的think标签和anwser标签 """
        try:
            # Use re.DOTALL to allow '.' to match newline characters
            think_match = re.search(r"<think>(.*?)</think>", response_content, re.DOTALL)
            answer_match = re.search(r"<anwser>([a-d])</anwser>", response_content)
            if answer_match:
                answer_content = answer_match.group(1).strip()
                if answer_content in ("a", "b", "c", "d"):
                    return answer_content
                else:
                    print(f"模型返回格式错误:\n {response_content}")
                    return ""
            else:
                print(f"模型返回格式错误:\n {response_content}")
                return ""

        except Exception as e:
            print(f"提取并验证标签过程中发生错误: {e}")
            return ""


def paper_type_classfier(user_requirement):
    classifier = RequirementClassifier()
    print("正在进行综述类型识别！")
    
    # 需求分类
    classification = classifier.classify(user_requirement)
    if not classification:
        classification = 'a'
    
    classification_map = {
        'a': "A survey and review of a technical concept",
        'b': "A review of the current research status in a specific field",
        'c': "A comparative analysis and review of multiple methods",
        'd': "A review of the research lineage of a technical method"
    }
    classification_type = classification_map.get(classification, '未知类别')
    print(f"需求分类结果：{classification} - {classification_type}")

    return classification_type


def paper_query_rewrite(user_requirement,classification):
    rewriter = ProblemRewriter()
    # 问题重写
    keywords = rewriter.rewrite(user_requirement, classification)
    return keywords


if __name__ == "__main__":
    # "Retrieval augmented generation and Similarity Search"
    # "Latest Advances and Cross-modal Fusion Strategies in Multimodal Learning"
    # "Comparative Analysis of Instruction Tuning Methods for Large Language Models"
    # "Review of the Development Trajectory of Object Detection Algorithms"

    user_requirement = "Retrieval augmented generation and Similarity Search"
    type = paper_type_classfier(user_requirement)
