import argparse
import hashlib
import random
import re
import time
from concurrent.futures import ThreadPoolExecutor  # 新增导入
from typing import List, Dict

import requests
from sentence_transformers import SentenceTransformer
from zhipuai import ZhipuAI

# 配置
MODEL = "glm-4-plus"
client = ZhipuAI(api_key='your api key')
BASE_URL = "http://180.184.65.98:38880/atomgit"
embedder = SentenceTransformer('all-MiniLM-L6-v2')


# 智谱大模型调用
def call_zhipu_ai(prompt: str) -> str:
    try:
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system",
                 "content": "You are an academic writer. Do not include any 'References' or 'Papers analyzed' section in the content."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.2,
            max_tokens=4000
        )
        content = response.choices[0].message.content
        print(f"API返回内容长度：{len(content)}字符")
        return content
    except Exception as e:
        print(f"ZhipuAI call failed: {str(e)}")
        return ""


# 论文查询接口
def search_papers(query: str, top_k: int = 50) -> List[Dict]:
    url = f"{BASE_URL}/search_papers"
    params = {"query": query, "top_k": top_k}
    try:
        response = requests.get(url, params=params, timeout=10)
        if response.status_code == 200:
            return response.json()
        else:
            print(f"API Error: {response.text}")
            return []
    except Exception as e:
        print(f"Request Error: {str(e)}")
        return []


# 自动匹配 review_type
def detect_review_type(query: str) -> str:
    # 增强型提示词
    prompt = f"""根据技术文献综述的分类标准，判断查询主题所属类型：

    **分类定义**
    - concept: 对技术概念的定义、组成、原理的调研（如'图神经网络基本原理'）
    - status: 领域研究现状分析，包含最新进展、瓶颈问题（如'自动驾驶感知技术最新进展'）
    - comparison: 多个方法/模型的对比，需包含性能指标对比（如'目标检测模型YOLO vs Faster R-CNN'）
    - evolution: 技术发展脉络，需包含时间维度的演进分析（如'深度学习框架发展历程'）

    **当前查询**
    <<{query}>>

    **响应要求**
    1. 必须严格返回指定类型名称（全小写）
    2. 不要包含额外解释
    3. 若无法判断，返回'unknown'
    """

    # 调用增强后的分类逻辑
    result = call_zhipu_ai(prompt).strip().lower()

    # 强化格式校验
    valid_types = {"concept", "status", "comparison", "evolution"}
    pattern = r"\b(" + "|".join(valid_types) + r")\b"

    match = re.search(pattern, result)
    return match.group(0) if match else "unknown"

# 多智能体类
class Agent:
    def __init__(self, role: str):
        self.role = role

    def process(self, task: str, data: Dict) -> Dict:
        if self.role == "retriever":
            return self._retrieve(task, data)
        elif self.role == "analyzer":
            return self._analyze(task, data)
        elif self.role == "writer":
            return self._write(task, data)
        elif self.role == "validator":
            return self._validate(task, data)
        return {}

    def _retrieve(self, task: str, data: Dict) -> Dict:
        """检索文献"""
        docs = search_papers(task, top_k=50)
        return {"docs": docs, "query": task}

    def _analyze(self, task: str, data: Dict) -> Dict:
        """分析文献，提取关键信息"""
        docs = data["docs"]
        analysis = []
        for idx, doc in enumerate(docs, 1):
            entity = doc.get("entity", {})
            paper_id = entity.get("paper_id", "unknown")
            title = entity.get("paper_title", "No Title")
            chunk_id = entity.get("chunk_id", "unknown_chunk")
            chunk_text = entity.get("chunk_text", "No Content")
            year = entity.get("year", "Unknown Year")
            analysis.append({
                "index": idx,
                "paper_id": paper_id,
                "title": title,
                "chunk_id": chunk_id,
                "content": chunk_text,
                "year": year,
                'venue': re.sub(r'\d{4}$', '', entity.get('original_filename', '').split('_')[4]) if len(
                    entity.get('original_filename', '').split('_')) > 4 else ''
            })
        return {"analysis": analysis, "docs": docs}

    def _write_section(self, task: str, section_title: str, analysis: List[Dict], start_idx: int, end_idx: int) -> str:
        prompt = f"请为主题‘{task}’生成一节学术综述，标题为‘### {section_title}’，字符数不少于600字。\n"
        prompt += "文献数据（请基于这些内容生成）：\n"
        for item in analysis[start_idx:end_idx]:
            prompt += f"[{item['index']}] 标题: {item['title']} (Paper ID: {item['paper_id']}, Chunk ID: {item['chunk_id']}, Year: {item['year']})\n"
            prompt += f"   内容片段: {item['content'][:500]}...\n\n"
        prompt += "要求：\n" \
                  "- 语言学术化，逻辑清晰，提供技术细节（如算法流程或数学公式）和研究意义。\n" \
                  "- 字符数必须不少于600字，深入分析文献内容，必要时补充实验数据或指标。\n" \
                  "- 正文以‘### {section_title}’开头，直接嵌入标题。\n" \
                  "- 每段必须标注引用，格式为：<sup>index</sup>，index 与文献编号对应。\n" \
                  "- 必须引用所有提供的5篇文献，每篇至少提及一次，嵌入正文为 <sup>index</sup>，引用需基于文献标题和内容片段进行详细分析（如技术方法、实验结果或应用场景），与段落主题紧密相关，避免泛泛而谈或简单罗列。\n" \
                  "- 每段至少包含2个具体引用，分析其技术贡献或实验成果（如算法细节、数据集性能、指标提升）。\n" \
                  "- 生成内容必须严格围绕主题‘{task}’，不得引入无关主题（如‘提升大模型规划能力’等其他任务）。\n"
        if section_title == "4. 对比分析":
            prompt += f"- 在本节中插入一个Markdown格式的表格，标题为‘{task}相关方法对比’，对比至少5种方法，列包括：方法名称、技术原理、核心创新点、性能指标、优缺点。表格内容基于提供的文献数据生成。\n"
        elif section_title == "3. 应用场景":
            prompt += "- 在本节中插入一个Markdown格式的表格，标题为‘{task}相关方法对比’，对比至少5种方法，列包括：方法名称、技术原理、数据集、性能指标、应用场景。表格内容基于提供的文献数据生成。\n"


        prompt += "- 章节间添加过渡句（如‘尽管应用广泛，以下挑战仍待解决’）。\n" \
                  "- 若字数不足600字，扩展分析内容，确保达标。\n" \
                  "- 不要添加参考文献列表。"

        content = call_zhipu_ai(prompt)
        expected_title = f"### {section_title}"
        if content.startswith(expected_title + "\n" + expected_title):
            content = content[len(expected_title) + 1:]
        elif not content.startswith(expected_title):
            content = f"{expected_title}\n{content}"

        used_indices = set()
        for item in analysis[start_idx:end_idx]:
            pattern = r"\[{}\]".format(item['index'])
            if re.search(pattern, content):
                content = re.sub(pattern, f"<sup>{item['index']}</sup>", content)
                used_indices.add(item['index'])
            variant_pattern = r"文献\[{}\]".format(item['index'])
            if re.search(variant_pattern, content):
                content = re.sub(variant_pattern, f"<sup>{item['index']}</sup>", content)
                used_indices.add(item['index'])

        if len(content) < 600:
            print(f"{section_title} 字数不足（{len(content)} < 600），正在扩展...")
            content += f"\n此外，该领域可进一步研究{task}与多模态数据的深度融合，以提升模型性能。"

        print(f"生成章节：{section_title}，长度：{len(content)}字符，引用数：{len(used_indices)}，首行：{content[:50]}...")
        return content + "\n"

    def _write(self, task: str, data: Dict) -> Dict:
        """并行生成完整综述"""
        analysis = data["analysis"]
        review_type = data.get("review_type", "concept")
        structure = self._get_structure(review_type).split("\n")

        content = ""
        section_indices = [(0, 10), (10, 20), (20, 30), (30, 40), (40, 50)]  # 分段分配文献

        # 使用线程池并行调用 _write_section
        with ThreadPoolExecutor(max_workers=5) as executor:
            futures = []
            for idx, (start_idx, end_idx) in enumerate(section_indices):
                if idx < len(structure) and structure[idx].strip():  # 确保不越界
                    section_title = structure[idx].strip()  # 如 "1. 引言"
                    futures.append(
                        executor.submit(self._write_section, task, section_title, analysis, start_idx, end_idx))

            # 按顺序收集结果，确保章节顺序正确
            for idx, future in enumerate(futures):
                section_content = future.result()
                content += section_content

        return {"content": content, "references": analysis}

    def _validate(self, task: str, data: Dict) -> Dict:
        """校验内容和引用"""
        content = data["content"]
        refs = data["references"]
        char_count = len(content)
        ref_count = len(refs)
        sup_count = len(re.findall(r"<sup>\d+</sup>", content))
        validation = {
            "char_count": char_count,
            "ref_count": ref_count,
            "sup_count": sup_count,
            "is_valid": char_count >= 3000 and ref_count >= 50 and sup_count >= 15
        }
        return {"content": content, "validation": validation, "references": refs}

    def _get_structure(self, review_type: str) -> str:
        """根据综述类型返回结构"""
        if review_type == "concept":
            return "1. 引言\n2. 定义与背景\n3. 应用场景\n4. 技术挑战\n5. 结论"
        elif review_type == "status":
            return "1. 引言\n2. 研究现状\n3. 主要挑战\n4. 未来方向"
        elif review_type == "comparison":
            return "1. 引言\n2. 技术背景\n3. 相关方法\n4. 对比分析\n5. 结论与展望"
        elif review_type == "evolution":
            return "1. 引言\n2. 早期阶段\n3. 发展历程\n4. 现代趋势\n5. 未来展望"
        return "1. 引言\n2. 正文\n3. 结论"


# 主流程
class ReviewGenerator:
    def __init__(self):
        self.agents = {
            "retriever": Agent("retriever"),
            "analyzer": Agent("analyzer"),
            "writer": Agent("writer"),
            "validator": Agent("validator")
        }

    def generate_review(self, query: str, review_type: str = None) -> str:
        """生成结构化综述"""
        start_time = time.time()

        # 0. 确定综述类型
        if not review_type:
            type_start = time.time()
            review_type = detect_review_type(query)
            print(f"自动匹配综述类型：{review_type}，耗时：{time.time() - type_start:.2f}秒")
        else:
            print(f"使用指定综述类型：{review_type}，耗时：0.00秒")

        data = {"review_type": review_type}

        # 1. 检索
        retrieve_start = time.time()
        data = self.agents["retriever"].process(query, data)
        retrieve_time = time.time() - retrieve_start
        print(f"步骤1 - 检索耗时：{retrieve_time:.2f}秒")
        if not data["docs"]:
            return "检索失败，无法生成综述"

        # 2. 分析
        analyze_start = time.time()
        data = self.agents["analyzer"].process(query, data)
        analyze_time = time.time() - analyze_start
        print(f"步骤2 - 分析耗时：{analyze_time:.2f}秒")

        # 3. 写作
        data["review_type"] = review_type
        write_start = time.time()
        data = self.agents["writer"].process(query, data)
        write_time = time.time() - write_start
        print(f"步骤3 - 写作耗时：{write_time:.2f}秒")

        # 4. 校验
        validate_start = time.time()
        data = self.agents["validator"].process(query, data)
        validate_time = time.time() - validate_start
        print(f"步骤4 - 校验耗时：{validate_time:.2f}秒")

        # 5. 格式化输出
        format_start = time.time()
        content = data["content"]
        refs = data["references"]
        validation = data["validation"]

        ref_section = "\n## 参考文献\n"
        for ref in refs:
            ref_section += f"[{ref['index']}] {ref['title']}. {ref['venue']},{ref['year']}, chunk {ref['chunk_id']}\n\n"

        output = f"{content}\n{ref_section}"
        # output = f"{content}\n{ref_section}\n\n校验结果：字符数={validation['char_count']}，引用数={validation['ref_count']}，<sup>标签数={validation['sup_count']}"
        format_time = time.time() - format_start
        print(f"步骤5 - 格式化输出耗时：{format_time:.2f}秒")

        total_time = time.time() - start_time
        print(f"总耗时：{total_time:.2f}秒")
        return output


# 秒表计时类
class Stopwatch:
    def __init__(self):
        self._start_time = None

    def start(self):
        self._start_time = time.time()
        print("计时开始...")

    def stop(self):
        if self._start_time is None:
            print("秒表未启动。")
        else:
            duration = time.time() - self._start_time
            print(f"总耗时: {duration:.2f} 秒")
            return duration


# MD5计算函数
def calculate_file_hash(file_path):
    hash_md5 = hashlib.md5()
    with open(file_path, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()


# 示例运行
def main():
    parser = argparse.ArgumentParser(description="Literature Review Generator")
    parser.add_argument("--topic", type=str, required=True, help="Review topic")
    args = parser.parse_args()

    stopwatch = Stopwatch()

    # 使用命令行参数作为query的值
    query = args.topic

    stopwatch.start()  # 开始计时

    generator = ReviewGenerator()
    print(f"\n生成 {query} 的综述...\n")
    review = generator.generate_review(query)

    file_path = f"{query}_review.md"
    try:
        with open(file_path, "w", encoding="utf-8") as f:
            f.write(f"# {query}\n\n")
            f.write(review)

        # 文章生成后的操作
        md5_code = calculate_file_hash(file_path)  # 计算生成文章的MD5码
        print(f"文章的MD5码: {md5_code}")
    except IOError as e:
        print(f"文件操作发生错误: {e}")
    finally:
        stopwatch.stop()  # 停止计时


if __name__ == "__main__":
    main()