# 导入必要的库和模块
from fastapi import APIRouter, Request, HTTPException
from fastapi.responses import StreamingResponse
from dotenv import load_dotenv
from app.services.github_service import GitHubService
from app.services.deepseek_service import DeepSeekService
from app.prompts import (
    SYSTEM_FIRST_PROMPT,
    SYSTEM_SECOND_PROMPT,
    SYSTEM_THIRD_PROMPT,
    ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT,
    NO_README_INSTRUCTIONS,
)
from anthropic._exceptions import RateLimitError
from pydantic import BaseModel
from functools import lru_cache
import re
import json
import asyncio

load_dotenv()

print("========== 初始化 DeepSeek 生成路由 ==========")

router = APIRouter(prefix="/deepseek/mermaid", tags=["DeepSeek"])

# Initialize DeepSeek service instance
deepseek_service = DeepSeekService()
print("已初始化 DeepSeekService 服务")


# Cache GitHub data to avoid duplicate API calls when calculating cost and generating diagrams
@lru_cache(maxsize=100)
def get_cached_github_data(username: str, repo: str, github_pat: str | None = None):
    """获取并缓存GitHub仓库数据
    
    为避免重复API调用，使用缓存机制存储已获取的GitHub仓库数据。
    每次调用时创建新的GitHubService实例，确保使用最新的PAT。
    
    Args:
        username (str): GitHub用户名
        repo (str): 仓库名称
        github_pat (str | None): GitHub个人访问令牌（可选）
        
    Returns:
        dict: 包含默认分支、文件树和README的字典
    """
    # Create a new GitHubService instance for each call
    current_github_service = GitHubService(pat=github_pat)

    # Get the default branch of the repo (main, master, etc.)
    default_branch = current_github_service.get_default_branch(username, repo)
    if not default_branch:
        default_branch = "main"  # Default to "main" if retrieval fails

    # Get the full file tree (list of all file paths)
    file_tree = current_github_service.get_github_file_paths_as_list(username, repo)
    # Get the README content
    readme = current_github_service.get_github_readme(username, repo)

    # Return a dictionary with all retrieved data
    return {"default_branch": default_branch, "file_tree": file_tree, "readme": readme}


# Define the API request body model
class ApiRequest(BaseModel):
    """API请求体模型，定义了生成图表所需的参数
    
    Attributes:
        username (str): GitHub用户名
        repo (str): 仓库名称
        instructions (str): 自定义指令（可选）
        api_key (str | None): 用户提供的DeepSeek API密钥（可选）
        github_pat (str | None): GitHub个人访问令牌（可选）
    """
    username: str
    repo: str
    instructions: str = ""
    api_key: str | None = None
    github_pat: str | None = None
    model: str = "deepseek-chat"


# Define the route for cost calculation
@router.post("/cost")
async def get_generation_cost(request: Request, body: ApiRequest):
    """
    计算生成图表的预估成本
    
    根据仓库文件树和README的令牌数量，计算使用DeepSeek API生成图表的预估成本。
    
    Args:
        request (Request): FastAPI请求对象
        body (ApiRequest): 请求体，包含GitHub仓库信息
        
    Returns:
        dict: 包含预估成本或错误信息的字典
    """
    print(f"[DeepSeek] 成本计算请求: 用户名={body.username}, 仓库={body.repo}, 模型={body.model}")
    try:
        # Get cached GitHub data (file tree and README)
        github_data = get_cached_github_data(body.username, body.repo, body.github_pat)
        file_tree = github_data["file_tree"]
        readme = github_data["readme"]

        # Calculate tokens using DeepSeek service
        file_tree_tokens = deepseek_service.count_tokens(file_tree)
        readme_tokens = deepseek_service.count_tokens(readme)
        print(f"[DeepSeek] 令牌计数: 文件树={file_tree_tokens}, README={readme_tokens}")

        # DeepSeek-Chat cost calculation
        # These rates are approximate and may need adjustment
        input_cost = ((file_tree_tokens * 2 + readme_tokens) + 3000) * 0.0000005  # $0.5 per 1M tokens for input
        output_cost = 8000 * 0.0000015  # $1.5 per 1M tokens for output
        estimated_cost = input_cost + output_cost

        # Format as dollar string with 2 decimal places
        cost_string = f"${estimated_cost:.2f} USD"
        print(f"[DeepSeek] 成本计算结果: {cost_string}")
        return {"cost": cost_string}
    except Exception as e:
        print(f"[DeepSeek] 成本计算错误: {str(e)}")
        return {"error": str(e)}


# Process click events in Mermaid diagrams, adding GitHub URLs
def process_click_events(diagram: str, username: str, repo: str, branch: str) -> str:
    """
    处理Mermaid图表中的点击事件，添加完整的GitHub URL。
    检测路径是文件还是目录，并使用适当的URL格式。
    
    Args:
        diagram (str): Mermaid图表代码
        username (str): GitHub用户名
        repo (str): 仓库名称
        branch (str): 分支名称
        
    Returns:
        str: 处理后的Mermaid图表代码，包含完整的GitHub URL
    """

    def replace_path(match):
        # Extract path from click event
        path = match.group(2).strip("\"'")

        # Determine if the path is a file (has extension) or directory
        is_file = "." in path.split("/")[-1]

        # Build GitHub URL
        base_url = f"https://github.com/{username}/{repo}"
        path_type = "blob" if is_file else "tree"
        full_url = f"{base_url}/{path_type}/{branch}/{path}"

        # Return click event with complete URL
        return f'click {match.group(1)} "{full_url}"'

    # Use regex to match click events: click component "path/to/something"
    click_pattern = r'click ([^\s"]+)\s+"([^"]+)"'
    # Use re.sub to replace all matches, converting relative paths to full GitHub URLs
    return re.sub(click_pattern, replace_path, diagram)


# 添加Mermaid语法处理函数
def sanitize_mermaid_diagram(diagram: str) -> str:
    """
    处理并修正Mermaid图表代码中的常见语法错误
    
    Args:
        diagram (str): 原始Mermaid图表代码
        
    Returns:
        str: 处理后的Mermaid图表代码
    """
    # 1. 移除可能的 Markdown 代码块标记
    diagram = diagram.replace("```mermaid", "").replace("```", "").strip()
    
    # 2. 确保流程图以 flowchart 语法开头，而不是 graph
    if diagram.strip().startswith("graph "):
        diagram = diagram.replace("graph ", "flowchart ", 1)
    
    # 3. 确保图表有正确的开头
    if not (diagram.strip().startswith("flowchart ") or diagram.strip().startswith("graph ")):
        # 尝试找到图表的实际开始位置
        flowchart_index = diagram.find("flowchart ")
        graph_index = diagram.find("graph ")
        
        if flowchart_index > -1:
            diagram = diagram[flowchart_index:]
        elif graph_index > -1:
            # 替换为flowchart语法
            diagram = "flowchart " + diagram[graph_index + 6:]
    
    # 4. 修复常见语法错误
    # 移除可能的空行
    diagram = "\n".join([line for line in diagram.split("\n") if line.strip()])
    
    # 修复箭头语法
    diagram = diagram.replace("---->", "-->")
    diagram = diagram.replace("----->", "-->")
    diagram = diagram.replace("- ->", "-->")
    diagram = diagram.replace(" ->", "-->")
    
    # 修复subgraph语法错误
    subgraph_pattern = r'subgraph\s+([A-Za-z0-9_-]+)\s+"([^"]+)"'
    diagram = re.sub(subgraph_pattern, r'subgraph "\2"', diagram)
    
    # 修复边缘关系标签中的空格
    edge_label_pattern = r'-->(\s*)\|(\s*)"([^"]+)"(\s*)\|'
    diagram = re.sub(edge_label_pattern, r'-->|"\3"|', diagram)
    
    # 修复缺少引号的标签
    node_label_pattern = r'\[([^"\]]+[\(\)\[\]]+[^\]"]*)\]'
    diagram = re.sub(node_label_pattern, r'["\1"]', diagram)
    
    return diagram


# Define the route for streaming diagram generation
@router.post("/stream")
async def generate_stream(request: Request, body: ApiRequest):
    """
    流式生成系统设计图（DeepSeek版本）
    
    这是GitDiagram的DeepSeek版本核心功能，通过三阶段处理流程生成系统设计图：
    1. 第一阶段：生成项目架构解释
    2. 第二阶段：创建组件映射
    3. 第三阶段：生成Mermaid图表
    
    整个过程使用流式响应，实时返回生成结果。
    支持处理没有README文件的仓库，仅使用文件树结构进行分析。
    
    Args:
        request (Request): FastAPI请求对象
        body (ApiRequest): 请求体，包含GitHub仓库信息和可选的自定义指令
        
    Returns:
        StreamingResponse: 流式响应对象，包含生成过程和最终结果
    """
    print(f"[DeepSeek] 生成请求: 用户名={body.username}, 仓库={body.repo}, 是否有API密钥={bool(body.api_key)}")
    
    # 验证指令长度
    if len(body.instructions) > 1000:
        print("[DeepSeek] 错误: 指令长度超过限制")
        return {"error": "Instructions exceed maximum length of 1000 characters"}

    try:
        # 定义异步事件生成器函数
        async def event_generator():
            # Create the GitHubService instance with the provided PAT
            current_github_service = GitHubService(pat=body.github_pat)
            
            try:
                # Get repository information
                default_branch = current_github_service.get_default_branch(body.username, body.repo)
                if not default_branch:
                    default_branch = "main"  # Default to "main" if retrieval fails
                
                file_tree = current_github_service.get_github_file_paths_as_list(body.username, body.repo)
                readme = current_github_service.get_github_readme(body.username, body.repo)
                
                # 标记是否存在README
                has_readme = len(readme.strip()) > 0
                if not has_readme:
                    print(f"[DeepSeek] 注意: 仓库 {body.username}/{body.repo} 没有README文件，将仅使用文件树进行分析")
                    # 确保README变量不是None，防止后续计算token出错
                    readme = ""
                
                print(f"[DeepSeek] 已获取GitHub数据: 默认分支={default_branch}, 文件树长度={len(file_tree)}, 有README={has_readme}")
                
                if not file_tree:
                    print("[DeepSeek] 错误: 无法获取仓库文件树")
                    yield f"data: {json.dumps({'status': 'error', 'error': 'Failed to retrieve repository file tree. Please check the repository name and ensure it exists.'})}\n\n"
                    return
                
                # 发送初始状态
                yield f"data: {json.dumps({'status': 'explanation_sent', 'message': 'Analyzing repository structure...'})}\n\n"
                await asyncio.sleep(0.1)  # Small delay for frontend to process
                
                # 检查token数量
                combined_content = f"{file_tree}\n{readme}"
                token_count = deepseek_service.count_tokens(combined_content)
                print(f"[DeepSeek] Token count: {token_count}")
                
                # 如果token数量超过DeepSeek的上下文窗口限制，裁剪文件树
                if token_count > 60000:
                    # 将文件树拆分为行
                    file_tree_lines = file_tree.split('\n')
                    # 逐步减少文件数量，直到token数量在安全范围内
                    while token_count > 60000 and len(file_tree_lines) > 100:
                        # 每次减少20%的文件数量
                        reduction = max(int(len(file_tree_lines) * 0.2), 50)
                        file_tree_lines = file_tree_lines[:-reduction]
                        # 添加指示文件被裁剪的提示
                        file_tree_lines.append(f"... (and {reduction} more files not shown due to token limit)")
                        # 重新计算令牌数量
                        modified_content = f"{'\n'.join(file_tree_lines)}\n{readme}"
                        token_count = deepseek_service.count_tokens(modified_content)
                        print(f"[DeepSeek] 裁剪后令牌数量: {token_count}, 剩余文件数: {len(file_tree_lines)}")
                    
                    # 更新文件树
                    file_tree = '\n'.join(file_tree_lines)
                    print(f"[DeepSeek] 文件树已裁剪至 {len(file_tree_lines)} 个文件")
                    
                    # 再次检查令牌数量限制
                    if token_count > 65000:
                        yield f"data: {json.dumps({'error': f'Repository is still too large after optimization. Current size: {token_count} tokens. Please provide a smaller repository or focus on specific directories.'})}\n\n"
                        return

                # 准备附加指令
                if 50000 < token_count < 65000 and not body.api_key:
                    yield f"data: {json.dumps({'error': f'File tree and README combined exceeds token limit (50,000). Current size: {token_count} tokens. This GitHub repository is too large for my wallet, but you can continue by providing your own DeepSeek API key.'})}\n\n"
                    return

                # 准备提示模板
                first_system_prompt = SYSTEM_FIRST_PROMPT
                second_system_prompt = SYSTEM_SECOND_PROMPT
                third_system_prompt = SYSTEM_THIRD_PROMPT
                
                # 如果没有README，添加特殊指令
                if not has_readme:
                    first_system_prompt = first_system_prompt + "\n" + NO_README_INSTRUCTIONS
                
                # 添加自定义指令（如果有）
                additional_instructions = ""
                if body.instructions:
                    additional_instructions = ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT.replace("{{instructions}}", body.instructions)
                    first_system_prompt = first_system_prompt + "\n" + additional_instructions
                    third_system_prompt = third_system_prompt + "\n" + additional_instructions
                
                yield f"data: {json.dumps({'status': 'explanation', 'message': 'Generating repository explanation...'})}\n\n"
                
                explanation = ""
                system_prompt = first_system_prompt
                
                # Start streaming explanation
                async for chunk in deepseek_service.call_deepseek_api_stream(
                    system_prompt=system_prompt,
                    data={
                        "username": body.username,
                        "repo": body.repo,
                        "file_tree": file_tree,
                        "readme": readme,
                    },
                    api_key=body.api_key
                ):
                    explanation += chunk
                    yield f"data: {json.dumps({'status': 'explanation_chunk', 'chunk': chunk})}\n\n"
                
                # STEP 2: Generate component mapping
                print("[DeepSeek] 开始第二阶段: 创建组件映射")
                yield f"data: {json.dumps({'status': 'mapping_sent', 'message': 'Repository explanation complete.'})}\n\n"
                await asyncio.sleep(0.1)
                
                yield f"data: {json.dumps({'status': 'mapping', 'message': 'Creating component mapping...'})}\n\n"
                
                mapping = ""
                async for chunk in deepseek_service.call_deepseek_api_stream(
                    system_prompt=second_system_prompt,
                    data={
                        "username": body.username,
                        "repo": body.repo,
                        "explanation": explanation,
                        "file_tree": file_tree,
                    },
                    api_key=body.api_key,
                ):
                    mapping += chunk
                    yield f"data: {json.dumps({'status': 'mapping_chunk', 'chunk': chunk})}\n\n"
                
                # STEP 3: Generate Mermaid diagram
                print("[DeepSeek] 开始第三阶段: 生成Mermaid图表")
                yield f"data: {json.dumps({'status': 'diagram_sent', 'message': 'Component mapping complete.'})}\n\n"
                await asyncio.sleep(0.1)
                
                yield f"data: {json.dumps({'status': 'diagram', 'message': 'Generating Mermaid diagram...'})}\n\n"
                
                diagram = ""
                async for chunk in deepseek_service.call_deepseek_api_stream(
                    system_prompt=third_system_prompt,
                    data={
                        "username": body.username,
                        "repo": body.repo,
                        "explanation": explanation,
                        "mapping": mapping,
                    },
                    api_key=body.api_key
                ):
                    diagram += chunk
                    yield f"data: {json.dumps({'status': 'diagram_chunk', 'chunk': chunk})}\n\n"
                
                # 使用增强的Mermaid语法处理函数
                diagram = sanitize_mermaid_diagram(diagram)
                
                # 添加调试日志便于排查问题
                print(f"Generated and sanitized Mermaid diagram:\n{diagram}\n")
                
                # 检查是否包含 BAD_INSTRUCTIONS 标记
                if "BAD_INSTRUCTIONS" in diagram:
                    yield f"data: {json.dumps({'status': 'error', 'error': 'Invalid or unclear instructions provided'})}\n\n"
                    return
                
                # 处理点击事件
                final_diagram = process_click_events(diagram, body.username, body.repo, default_branch)
                
                # Complete the generation process
                print("[DeepSeek] 第三阶段完成")
                print("[DeepSeek] 生成完成: 返回最终结果")
                yield f"data: {json.dumps({'status': 'complete', 'explanation': explanation, 'diagram': final_diagram})}\n\n"
                
            except RateLimitError:
                yield f"data: {json.dumps({'status': 'error', 'error': 'DeepSeek API rate limit exceeded. Please try again later.'})}\n\n"
            except Exception as e:
                error_message = str(e)
                if "API key" in error_message:
                    yield f"data: {json.dumps({'status': 'error', 'error': 'Invalid API key. Please provide a valid DeepSeek API key.'})}\n\n"
                else:
                    yield f"data: {json.dumps({'status': 'error', 'error': f'An error occurred: {error_message}'})}\n\n"
        
        # Return a streaming response
        return StreamingResponse(
            event_generator(),
            media_type="text/event-stream",
            headers={
                "X-Accel-Buffering": "no",
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
            },
        )
    
    except Exception as e:
        print(f"[DeepSeek] 请求处理出错: {str(e)}")
        error_message = str(e)
        if isinstance(e, RateLimitError):
            raise HTTPException(
                status_code=429,
                detail="Service is currently experiencing high demand. Please try again in a few minutes.",
            )
        return {"error": error_message} 