import os
import shutil
import logging
from typing import List, Dict, Optional, Tuple, Set, Union, Any
from dataclasses import dataclass, field
from pathlib import Path
import yaml
import json
import re
import hashlib
import asyncio
import aiofiles
import aiofiles.os as aios
from contextlib import asynccontextmanager
import sys
sys.modules['sqlite3'] = __import__('pysqlite3')
from concurrent.futures import ThreadPoolExecutor
import clang.cindex as cindex
from enum import Enum, auto
from openai import OpenAI
import glob
from tqdm import tqdm

# 第三方库
import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from sentence_transformers import SentenceTransformer


from langchain.prompts import PromptTemplate


class ConversionError(Exception):
    pass

class FileError(Exception):
    pass

class APIError(Exception):
    pass

@dataclass
class Config:
    api_key: str 
    base_url: str = "https://api.deepseek.com/beta"  # Default DeepSeek URL
    model: str = "deepseek-coder"  # Default DeepSeek model
    temperature: float = 0.0
    max_tokens: int = 8000
    log_level: str = "INFO" 
    log_file: str = "conversion.log"
    rustc_path: str = "rustc"  # 添加 rustc 的路径，可根据需要调整

    @classmethod
    def from_yaml(cls, yaml_file: str) -> 'Config':
        with open(yaml_file, 'r') as f:
            config_dict = yaml.safe_load(f)
        return cls(**config_dict)

def setup_logging(config: Config):
    logging.basicConfig(
        level=getattr(logging, config.log_level.upper(), logging.INFO),
        format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
        handlers=[
            logging.FileHandler(config.log_file),
            logging.StreamHandler()
        ]
    )

def escape_braces(text: str) -> str:
    return text.replace("{", "{{").replace("}", "}}")


class AsyncFileHandler:
    """异步文件处理器"""
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self._file_cache = {}
        self._cache_lock = asyncio.Lock()

    @asynccontextmanager
    async def _open_file(self, path: Path, mode: str, encoding: Optional[str] = None, **kwargs):
        file = None
        try:
            file = await aiofiles.open(path, mode, encoding=encoding, **kwargs)
            yield file
        finally:
            if file:
                await file.close()

    async def read_cpp_file(self, file_path: Union[str, Path], encoding: str = 'utf-8', cache: bool = True) -> str:
        path = Path(file_path)
        try:
            if cache:
                async with self._cache_lock:
                    cached = self._file_cache.get(str(path))
                    if cached:
                        self.logger.debug(f"Using cached content for {path}")
                        return cached

            if not await aios.path.exists(path):
                raise FileError(f"File not found: {path}")

            stats = await aios.stat(path)
            if stats.st_size > 10 * 1024 * 1024:
                self.logger.warning(f"Large file detected: {path}")

            async with self._open_file(path, 'r', encoding=encoding) as file:
                content = await file.read()

                if cache:
                    async with self._cache_lock:
                        self._file_cache[str(path)] = content

                return content

        except UnicodeError as e:
            self.logger.error(f"Encoding error reading {path}: {str(e)}")
            try:
                async with self._open_file(path, 'r', encoding='latin1') as file:
                    content = await file.read()
                self.logger.info(f"Successfully read {path} using latin1 encoding")
                return content
            except Exception as e2:
                raise FileError(f"Failed to read file with alternative encoding: {str(e2)}")

        except Exception as e:
            self.logger.error(f"Error reading file {path}: {str(e)}")
            raise FileError(f"Failed to read C++ file: {str(e)}")

    async def write_rust_file(self, original_path: Union[str, Path], content: str,
                               output_dir: Path, is_test: bool = False) -> None:
        """写入Rust文件到指定输出目录"""
        try:
            original_path = Path(original_path)
            filename = original_path.name

            if is_test:
                # 测试文件确保有 "test_" 前缀
                if not (filename.startswith("test_") or filename.startswith("test-")):
                    filename = f"test_{filename}"
            else:
                # 源文件去掉 "test_" 或 "test-" 前缀
                if filename.startswith("test_"):
                    filename = filename[5:]
                elif filename.startswith("test-"):
                    filename = filename[5:]

            # 确保输出路径正确
            stem = Path(filename).stem.replace("-", "_")
            rust_filename = f"{stem}.rs"
            output_path = output_dir / rust_filename

            # 确保输出目录存在
            output_path.parent.mkdir(parents=True, exist_ok=True)

            # 清理内容
            content = self._clean_content(content)

            # 写入文件
            async with self._open_file(output_path, 'w', encoding='utf-8') as file:
                await file.write(content)

            self.logger.info(f"Successfully wrote Rust code to {output_path}")
            self.logger.debug(f"Written Rust file: {output_path}, Content length: {len(content)}")

        except Exception as e:
            self.logger.error(f"Error writing Rust file to {output_dir}: {str(e)}")
            raise
    
    def _clean_content(self, content: str) -> str:
        """清理代码内容"""
        # 删除markdown标记
        content = re.sub(r'^```\w*\n', '', content)
        content = re.sub(r'\n```$', '', content)
    
        # 删除文件名注释
        content = re.sub(r'^// .*\.rs\n', '', content)
    
        # 删除多余的Documentation注释
        content = re.sub(r'^/// Documentation\n', '', content, flags=re.MULTILINE)
    
        # 确保内容以换行结束
        content = content.strip() + '\n'
    
        return content

    async def clear_cache(self) -> None:
        async with self._cache_lock:
            self._file_cache.clear()

    async def remove_from_cache(self, file_path: Union[str, Path]) -> None:
        path_str = str(Path(file_path))
        async with self._cache_lock:
            self._file_cache.pop(path_str, None)

@dataclass
class RAGDocument:
    content: str
    metadata: Dict
    doc_type: str

class KnowledgeBase:
    def __init__(self, base_path: str):
        self.base_path = Path(base_path)
        self.logger = logging.getLogger(self.__class__.__name__)
        self._ensure_knowledge_base()

    def update_specific_document(self, doc_type: str, file_name: str, new_content: Dict[str, Any]):
        """
        增量更新特定知识文档，例如 binary_heap.json。
        new_content 应符合 basic_docs 中的格式。
        """
        try:
            # 将 doc_type 映射到正确的目录
            if doc_type == "error_resolution":
                doc_path = self.base_path / "error_resolution" / f"{file_name}.json"
            else:
                doc_path = self.base_path / doc_type / f"{file_name}.json"

            self.logger.info(f"Updating knowledge document: {doc_path}")

            # 确保目录存在
            doc_path.parent.mkdir(parents=True, exist_ok=True)

            if doc_path.exists():
                with open(doc_path, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
                self.logger.debug(f"Existing content: {existing_data['content']}")
            else:
                existing_data = {
                    "content": "",
                    "metadata": {
                        "category": doc_type,
                        "subcategory": file_name,
                        "relevance": 1.0
                    }
                }
                self.logger.info(f"Creating new knowledge document: {doc_path}")

            # 假设 new_content 是一个字典，包含 'content' 和可能的其他字段
            if 'content' in new_content:
                if existing_data['content']:
                    existing_data['content'] += "\n" + new_content['content']
                else:
                    existing_data['content'] = new_content['content']
                self.logger.debug(f"Updated content: {existing_data['content']}")

            # 更新 metadata（如果需要）
            if 'metadata' in new_content:
                existing_data['metadata'].update(new_content['metadata'])
                self.logger.debug(f"Updated metadata: {existing_data['metadata']}")

            with open(doc_path, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)

            self.logger.info(f"Updated knowledge document: {doc_path}")

        except Exception as e:
            self.logger.error(f"更新知识文档失败: {str(e)}")
            raise

    def has_valid_documents(self) -> bool:
        try:
            for dir_name in ["syntax", "patterns", "api", "examples"]:
                dir_path = self.base_path / dir_name
                if dir_path.exists():
                    for file_path in dir_path.glob("*.json"):
                        if file_path.stat().st_size > 0:
                            return True
            return False
        except Exception as e:
            self.logger.error(f"检查知识库文档失败: {str(e)}")
            return False

    def _ensure_knowledge_base(self):
        try:
            for dir_name in ["syntax", "patterns", "api", "examples", "error_resolution"]:
                dir_path = self.base_path / dir_name
                dir_path.mkdir(parents=True, exist_ok=True)
            self._ensure_basic_documents()
        except Exception as e:
            self.logger.error(f"初始化知识库失败: {str(e)}")
            raise

    def _ensure_basic_documents(self):
        basic_docs = {
            "syntax/basic.json": {
                "content": "基础语法转换对照:\n1. C指针 -> Rust引用\n2. C结构体 -> Rust结构体和impl\n3. C宏定义 -> Rust宏"
                ,
                "metadata": {
                    "category": "syntax",
                    "subcategory": "basic",
                    "relevance": 1.0
                }
            },
            "patterns/basic.json": {
                "content": "基础设计模式转换:\n1. RAII模式\n2. 工厂模式\n3. 观察者模式",
                "metadata": {
                    "category": "patterns",
                    "subcategory": "basic",
                    "relevance": 1.0
                }
            },
            "api/basic.json": {
                "content": "基础API对照:\n1. malloc -> std::alloc::alloc\n2. free -> std::alloc::dealloc\n3. memcpy -> std::ptr::copy_nonoverlapping",
                "metadata": {
                    "category": "api",
                    "subcategory": "basic",
                    "relevance": 1.0
                }
            }
        }

        for file_path, content in basic_docs.items():
            full_path = self.base_path / file_path
            if not full_path.exists():
                full_path.parent.mkdir(parents=True, exist_ok=True)
                with open(full_path, 'w', encoding='utf-8') as f:
                    json.dump(content, f, ensure_ascii=False, indent=2)

    def load_documents(self) -> List[RAGDocument]:
        if not self.has_valid_documents():
            self.logger.info("未找到有效的知识文档，跳过加载")
            return []

        documents = []
        try:
            for dir_name in ["syntax", "patterns", "api", "examples"]:
                dir_path = self.base_path / dir_name
                if dir_path.exists():
                    for file_path in dir_path.glob("*.json"):
                        try:
                            if file_path.stat().st_size > 0:
                                with open(file_path) as f:
                                    data = json.load(f)
                                    documents.append(RAGDocument(
                                        content=data["content"],
                                        metadata=data.get("metadata", {}),
                                        doc_type=dir_name
                                    ))
                        except json.JSONDecodeError:
                            self.logger.warning(f"跳过无效的JSON文件: {file_path}")
                        except Exception as e:
                            self.logger.warning(f"加载文件 {file_path} 失败: {str(e)}")
                            continue
            return documents
        except Exception as e:
            self.logger.error(f"加载知识库文档失败: {str(e)}")
            return []

class LocalEmbeddingFunction(EmbeddingFunction):
    def __init__(self):
        model_path = "/usr/src/interoptool/temp_model"
        self.model = SentenceTransformer(model_path)
        self.logger = logging.getLogger(self.__class__.__name__)
        # 禁用进度条或使用动态更新
        self.progress_bar = tqdm(
            position=0, 
            leave=True,
            dynamic_ncols=True,
            desc="Processing"
        )

    def __call__(self, texts: Documents) -> Embeddings:
        if not texts:
            return []
        try:
            # 使用tqdm的上下文管理器
            with self.progress_bar as pbar:
                embeddings = self.model.encode(
                    texts, 
                    convert_to_tensor=False,
                    show_progress_bar=False  # 禁用模型内部进度条
                )
                pbar.update(1)
            return embeddings.tolist()
        except Exception as e:
            self.logger.error(f"生成嵌入向量失败: {str(e)}")
            raise

@dataclass
class QueryResult:
    content: str
    metadata: Dict[str, Any]
    score: float

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'QueryResult':
        return cls(
            content=data['content'],
            metadata=data.get('metadata', {}),
            score=data.get('score', 0.0)
        )

class RAGStore:
    def __init__(self, persist_dir: str, config: Config, knowledge_base: KnowledgeBase):
        self.logger = logging.getLogger(self.__class__.__name__)
        self._executor = ThreadPoolExecutor(max_workers=3)

        persist_path = Path(persist_dir)
        persist_path.mkdir(parents=True, exist_ok=True)

        self.client = chromadb.PersistentClient(path=str(persist_path))
        self.embedding_func = LocalEmbeddingFunction()

        collection_name = "cpp2rust"
        existing_collections = self.client.list_collections()

        if any(col.name == collection_name for col in existing_collections):
            self.collection = self.client.get_collection(
                name=collection_name,
                embedding_function=self.embedding_func
            )
            self.logger.info(f"获取到已存在的collection: {collection_name}")
        else:
            self.collection = self.client.create_collection(
                name=collection_name,
                embedding_function=self.embedding_func
            )
            self.logger.info(f"创建了新的collection: {collection_name}")

        self.knowledge_base = knowledge_base  # 添加对 KnowledgeBase 的引用

    def add_documents(self, documents: List[RAGDocument]):
        if not documents:
            self.logger.info("没有新文档需要添加")
            return

        try:
            existing_ids = self.collection.get()["ids"]
            new_docs = []
            new_ids = []
            new_metadatas = []

            for doc in documents:
                doc_id = self._get_doc_id(doc)
                if doc_id not in existing_ids:
                    new_docs.append(doc.content)
                    new_ids.append(doc_id)
                    new_metadatas.append(doc.metadata)

            if new_docs:
                self.collection.add(
                    documents=new_docs,
                    ids=new_ids,
                    metadatas=new_metadatas
                )
                self.logger.info(f"成功添加 {len(new_docs)} 个新文档")
            else:
                self.logger.info("所有文档都已存在，无需添加")

        except Exception as e:
            self.logger.error(f"添加文档失败: {str(e)}")
            raise

    async def query(self, query_text: str, n_results: int = 3, filter_dict: Optional[Dict] = None) -> List[QueryResult]:
        try:
            loop = asyncio.get_event_loop()
            results = await loop.run_in_executor(
                self._executor,
                lambda: self.collection.query(
                    query_texts=[query_text],
                    n_results=n_results,
                    where=filter_dict
                )
            )
    
            return await self._format_results(results)
        except Exception as e:
            self.logger.error(f"查询失败: {str(e)}")
            return []

    def _get_doc_id(self, doc: RAGDocument) -> str:
        content_hash = hashlib.md5(doc.content.encode()).hexdigest()
        return f"{doc.doc_type}_{content_hash}"

    async def _format_results(self, results: Dict) -> List[QueryResult]:
        try:
            formatted = []
            for idx, (doc, metadata, distance) in enumerate(zip(
                results['documents'][0],
                results['metadatas'][0],
                results['distances'][0]
            )):
                formatted.append(QueryResult(
                    content=doc,
                    metadata=metadata,
                    score=1 - distance
                ))
            return formatted
        except Exception as e:
            logging.getLogger(self.__class__.__name__).error(f"格式化结果失败: {str(e)}")
            return []


class RAGEnhancer:
    def __init__(self, rag_store: RAGStore):
        self.rag_store = rag_store
        self.logger = logging.getLogger(self.__class__.__name__)

    async def enhance_prompt_with_existing(self, base_prompt: str, cpp_code: str, existing_classes_code: str) -> str:
        try:
            features = self._extract_features(cpp_code)
            if not features:
                return base_prompt

            query_tasks = []
            for feature in features:
                task = self.rag_store.query(
                    query_text=feature,
                    n_results=2,
                    filter_dict={"relevance": {"$gt": 0.7}}
                )
                query_tasks.append(task)

            results = await asyncio.gather(*query_tasks, return_exceptions=True)

            relevant_docs: List[QueryResult] = []
            for result in results:
                if isinstance(result, Exception):
                    self.logger.warning(f"查询失败: {str(result)}")
                    continue
                relevant_docs.extend(result)

            relevant_docs.sort(key=lambda x: x.score, reverse=True)

            return self._integrate_docs_with_existing(
                base_prompt,
                relevant_docs,
                existing_classes_code
            )

        except Exception as e:
            self.logger.error(f"Prompt增强失败: {str(e)}")
            return base_prompt

    def _extract_features(self, cpp_code: Optional[str]) -> List[str]:
        features = []
        try:
            if not cpp_code or not isinstance(cpp_code, str):
                return features

            # 提取函数名
            function_matches = re.finditer(
                r'\b(?:void|int|float|double|char|long|short|unsigned|signed)\s+(\w+)\s*\(',
                cpp_code
            )
            features.extend(match.group(1) for match in function_matches)

            # 提取结构体名
            struct_matches = re.finditer(r'\bstruct\s+(\w+)', cpp_code)
            features.extend(match.group(1) for match in struct_matches)

            # 添加类型系统相关特征
            if 'unsigned' in cpp_code:
                features.append("unsigned_type_conversion")
            if 'size_t' in cpp_code:
                features.append("size_t_conversion")
            if 'struct' in cpp_code:
                features.append("struct_definition")

            # 添加数据结构特征
            if any(x in cpp_code.lower() for x in ['heap', 'priority_queue', 'linked_list', 'array']):
                features.append("data_structure_implementation")
            if 'compare' in cpp_code.lower():
                features.append("comparison_functions")

            # 识别测试相关特征
            if 'test' in cpp_code.lower():
                features.append("test_patterns")
                if 'assert' in cpp_code:
                    features.append("test_assertions")

            # 识别数据结构操作特征
            if re.search(r'insert|delete|find|push|pop', cpp_code):
                features.append("data_structure_operations")

            # 识别类型使用特征
            if re.search(r'struct\s+\w+', cpp_code):
                features.append("struct_type_usage")

            # 识别迭代模式
            if re.search(r'while|for|do', cpp_code):
                features.append("iteration_patterns")

            # 识别内存管理模式
            if re.search(r'malloc|free|calloc|realloc', cpp_code):
                features.append("memory_management")

            return list(set(features))
        except Exception as e:
            self.logger.error(f"提取特征失败: {str(e)}")
            return []
    
    def _integrate_docs_with_existing(self, prompt: str, docs: List[QueryResult], existing_classes_code: str) -> str:
        try:
            if not docs and not existing_classes_code:
                return prompt

            # 根据文档类型和得分排序
            weighted_docs = []
            for doc in docs:
                weight = doc.score
                # 提高测试相关文档的权重
                if 'test' in doc.metadata.get('subcategory', ''):
                    weight *= 1.5
                # 提高特定数据结构相关文档的权重
                if doc.metadata.get('subcategory') in ['heap', 'heap_tests', 'linked_list', 'array']:
                    weight *= 1.3
                weighted_docs.append((doc, weight))

            # 根据权重排序
            weighted_docs.sort(key=lambda x: x[1], reverse=True)

            context_parts = []

            # 只使用权重最高的3个文档
            for doc, weight in weighted_docs[:3]:
                if weight < 0.3:  # 降低阈值，但保留最低标准
                    continue
                context_parts.append(f"Reference (score: {weight:.2f}):")
                context_parts.append(doc.content)
                context_parts.append("")

            if existing_classes_code:
                context_parts.append("Existing Rust code:")
                context_parts.append(existing_classes_code)
                context_parts.append("")

            if context_parts:
                return f"""
Related knowledge:
{chr(10).join(context_parts)}

Original prompt:
{prompt}
"""
            return prompt

        except Exception as e:
            self.logger.error(f"整合文档失败: {str(e)}")
            return prompt


@dataclass
class ThoughtStep:
    step_name: str
    reasoning: str
    conclusion: str
    next_steps: List[str]

@dataclass
class ConversionThoughtChain:
    steps: List[ThoughtStep]
    final_code: str

    def to_json(self) -> str:
        return json.dumps({
            'steps': [vars(step) for step in self.steps],
            'final_code': self.final_code
        }, indent=2)

class ThoughtChainPromptManager:
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.prompts = self._initialize_prompts()

    def _initialize_prompts(self) -> Dict[str, str]:
        return {
            "system": """You are a C to Rust code conversion assistant specialized in data structures and algorithms.

Key responsibilities:
1. Analyze C code structure and patterns
2. Plan safe and idiomatic Rust conversions
3. Handle memory management and type safety
4. Generate proper test cases with type annotations

For data structures:
- Ensure proper generic type constraints (e.g., T: Ord for heaps)
- Convert C pointers to safe Rust references/ownership
- Maintain data structure invariants

Return all responses in this JSON format:
{{
  "reasoning": "<your detailed thought process>",
  "conclusion": "<your final conclusion>",
  "next_steps": ["<step 1>", "<step 2>", "..."]
}}""",

            "analyze_structure": """
Analyze the structure and functionality of the following C code:

{cpp_code}

Focus on these aspects:
1. Data structure type and properties:
   - Core data type (array, heap, struct, etc.)
   - Required type constraints (ordering, equality, etc.)
   - Memory management patterns
   - Function pointers and callbacks

2. Operations and functions:
   - Core algorithms
   - Pointer usage patterns
   - Type conversions
   - Trait requirements for types

3. Type system details:
   - Struct definitions
   - Type constraints
   - Raw pointer usage
   - Function type abstractions

4. Test requirements:
   - Type annotations
   - Test initialization patterns
   - Memory safety requirements
   - Debug needs

Special attention to:
- Function pointers and callbacks that may become trait objects
- Types that need specific trait implementations
- Debug requirements for types

Return your analysis in JSON format with:
- reasoning field explaining your analysis process
- conclusion field summarizing your findings
- next_steps array listing conversion steps""",

            "plan_conversion": """
Based on the previous analysis:
{analysis_result}

Plan the Rust conversion strategy for:
{cpp_code}

Include specific plans for:
1. Type System:
   - Generic type parameters and bounds
   - Concrete types in tests
   - Pointer to reference conversions

2. Memory Safety:
   - Ownership model
   - Reference handling
   - Raw pointer conversions

3. Data Structure Invariants:
   - Type constraints (e.g., Ord, Eq)
   - Structure properties
   - Safety guarantees

4. Testing Strategy:
   - Type annotations
   - Test initialization
   - Error cases

Ensure that all generic types have concrete type parameters based on the C code's usage.

Return your plan in JSON format with reasoning, conclusion, and next_steps fields.""",

            "implement_conversion": """
Based on the conversion plan:
{plan_result}

Convert this C code to Rust:

{cpp_code}

IMPORTANT REQUIREMENTS:
1. Documentation:
   /// Add documentation comments for ALL public items

2. Type Parameters:
   - Use specific type parameters based on the C code (e.g., ArrayList<i32>)
   - Always specify concrete types in tests (e.g., BinaryHeap<i32>)
   - Include necessary trait bounds (e.g., T: Ord for heap)
   - Use appropriate integer types (i32/i64 for signed values)

3. Visibility:
   - Add pub for structs and necessary fields
   - Use proper visibility modifiers

4. Code Structure:
   - Only derive necessary traits
   - For data structures, consider trait bounds carefully:
     * Debug is not always needed, especially for internal types
     * Clone/Copy should be derived only when needed
     * Custom traits don't automatically implement Debug
   - Add proper error handling
   - Follow Rust naming conventions
   - For test functions, add #[test] attribute

5. Testing:
   - Mark test functions with #[test]
   - Use assert! or assert_eq! for verifications
   - Follow Rust test naming conventions
   - Always use explicit type annotations in test code
   - Ensure proper test initialization and cleanup
   - Use appropriate integer types in test cases

6. Format:
   - Proper indentation
   - Consistent spacing
   - Clean and readable code

Ensure that all generic types have their type parameters specified explicitly based on the C code's usage.

Return ONLY the final Rust code.""",

            "verify_result": """
Verify the correctness of this Rust code:
{rust_code}

Original C code:
{cpp_code}

Compilation errors:
{rustc_errors}

Verification checklist:
1. Type System:
   - All generic parameters properly constrained
   - Correct trait bounds (e.g., T: Ord for heap)
   - Proper type annotations in tests
   - Safe pointer/reference conversions

2. Memory Safety:
   - Proper ownership model
   - Safe reference handling
   - Justified unsafe blocks

3. Data Structure Correctness:
   - Structure invariants maintained
   - Operations properly implemented
   - Error handling complete

4. Test Coverage:
   - All test cases properly annotated
   - Edge cases covered
   - Memory safety verified

Report any issues found and suggest improvements in the following JSON format:

{{
  "content": "<detailed error analysis and solutions>",
  "metadata": {{
    "category": "error_resolution",
    "subcategory": "<specific file/module name>",
    "relevance": 1.0
  }}
}}

Only return the above JSON object without any additional text or explanations.
"""
    }

    def get_prompt(self, step: str, **kwargs) -> str:
        try:
            if step not in self.prompts:
                raise KeyError(f"未知的步骤: {step}")

            prompt_template = self.prompts[step]
            # 确保所有必需的字段都已提供
            required_fields = {
                "verify_result": ["rust_code", "cpp_code", "rustc_errors"],
                # 其他步骤的必需字段...
            }

            missing_fields = []
            for field in required_fields.get(step, []):
                if field not in kwargs or kwargs[field] is None:
                    missing_fields.append(field)

            if missing_fields:
                raise ValueError(f"缺少必要的字段: {', '.join(missing_fields)}")

            # 转义花括号
            for key in ["rust_code", "cpp_code", "rustc_errors"]:
                if key in kwargs and isinstance(kwargs[key], str):
                    kwargs[key] = escape_braces(kwargs[key])

            formatted_prompt = prompt_template.format(**kwargs)
            self.logger.debug(f"生成的提示（步骤 '{step}'）: {formatted_prompt}")
            return formatted_prompt
        except Exception as e:
            self.logger.error(f"获取提示时出错: {str(e)}")
            raise


class AsyncTimeout:
    def __init__(self, timeout: float):
        self.timeout = timeout

    @asynccontextmanager
    async def __call__(self):
        # 简单实现，不同Python版本可能需更严格的超时处理
        try:
            task = asyncio.create_task(asyncio.sleep(self.timeout))
            yield
        except asyncio.CancelledError:
            raise asyncio.TimeoutError()
        finally:
            task.cancel()

class AIModel:
    def __init__(self, config: Config):
        self.config = config
        self.logger = logging.getLogger(self.__class__.__name__)
        self.client = OpenAI(
            api_key=config.api_key,
            base_url=config.base_url
        )

    async def generate_with_history(self, messages: List[Dict[str, str]]) -> str:
        """使用对话历史生成响应"""
        try:
            loop = asyncio.get_event_loop()
            response = await loop.run_in_executor(
                None,
                lambda: self.client.chat.completions.create(
                    model=self.config.model,
                    messages=messages,
                    temperature=self.config.temperature,
                    max_tokens=self.config.max_tokens
                )
            )

            content = response.choices[0].message.content
            return content
        except Exception as e:
            self.logger.error(f"DeepSeek API调用错误: {str(e)}")
            raise APIError(f"DeepSeek API调用失败: {str(e)}")

    async def generate(self, prompt: str, system_prompt: str = None) -> str:
        try:
            messages = []
            if system_prompt:
                messages.append({"role": "system", "content": system_prompt})
            messages.append({"role": "user", "content": prompt})

            loop = asyncio.get_event_loop()
            response = await loop.run_in_executor(
                None,
                lambda: self.client.chat.completions.create(
                    model=self.config.model,
                    messages=messages,
                    temperature=self.config.temperature,
                    max_tokens=self.config.max_tokens
                )
            )

            content = response.choices[0].message.content
            return content
        except Exception as e:
            self.logger.error(f"DeepSeek API调用错误: {str(e)}")
            raise APIError(f"DeepSeek API调用失败: {str(e)}")

class RustCodeValidator:
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)

    def _remove_comments_and_strings(self, code: str) -> str:
        code = re.sub(r'//[^\n]*', '', code)
        code = re.sub(r'/\*.*?\*/', '', code, flags=re.DOTALL)
        code = re.sub(r'"(?:[^"\\]|\\.)*"', '""', code)
        return code

class ThoughtChainProcessor:
    def __init__(self, ai_model: 'AIModel', rag_enhancer: Optional[RAGEnhancer] = None):
        self.ai_model = ai_model
        self.rag_enhancer = rag_enhancer
        self.prompt_manager = ThoughtChainPromptManager()
        self.logger = logging.getLogger(self.__class__.__name__)
        # 移除全局上下文，改为每个处理调用时传递
        # self.current_cpp_code = None
        # self.converted_classes_rust: Dict[str, str] = {}
        # self.validator = RustCodeValidator()

    async def process(self, cpp_code: str, existing_classes: Dict[str, str] = {}, is_test: bool = False) -> ConversionThoughtChain:
        thought_steps = []
        try:
            # 为每个文件创建独立的对话历史
            messages = []

            # 添加系统提示
            messages.append({
                "role": "system",
                "content": self.prompt_manager.prompts["system"]
            })

            # 分析阶段 - 添加 RAG 增强
            analysis_prompt = self.prompt_manager.get_prompt(
                "analyze_structure",
                cpp_code=cpp_code
            )
            if self.rag_enhancer is not None:
                analysis_prompt = await self.rag_enhancer.enhance_prompt_with_existing(
                    analysis_prompt,
                    cpp_code,
                    "\n\n".join(existing_classes.values())
                )
            messages.append({"role": "user", "content": analysis_prompt})

            analysis_result = await self._process_step_with_history(messages)
            thought_steps.append(analysis_result)
            messages.append({"role": "assistant", "content": analysis_result.conclusion})

            # 规划阶段 - 添加 RAG 增强
            plan_prompt = self.prompt_manager.get_prompt(
                "plan_conversion",
                analysis_result=analysis_result.conclusion,
                cpp_code=cpp_code
            )
            if self.rag_enhancer is not None:
                plan_prompt = await self.rag_enhancer.enhance_prompt_with_existing(
                    plan_prompt,
                    cpp_code,
                    "\n\n".join(existing_classes.values())
                )
            messages.append({"role": "user", "content": plan_prompt})

            plan_result = await self._process_step_with_history(messages)
            thought_steps.append(plan_result)
            messages.append({"role": "assistant", "content": plan_result.conclusion })

            # 实现阶段 - 已有的 RAG 增强
            implement_prompt = self.prompt_manager.get_prompt(
                "implement_conversion",
                plan_result=plan_result.conclusion,
                cpp_code=cpp_code
            )
            if self.rag_enhancer is not None:
                implement_prompt = await self.rag_enhancer.enhance_prompt_with_existing(
                    implement_prompt,
                    cpp_code,
                    "\n\n".join(existing_classes.values())
                )
            messages.append({"role": "user", "content": implement_prompt})

            implement_result = await self._process_step_with_history(messages)
            thought_steps.append(implement_result)

            return ConversionThoughtChain(
                steps=thought_steps,
                final_code=implement_result.conclusion
            )

        except Exception as e:
            self.logger.error(f"思维链处理错误: {str(e)}")
            raise

    async def _process_step_with_history(self, messages: List[Dict[str, str]]) -> ThoughtStep:
        try:
            response = await self.ai_model.generate_with_history(messages)
    
            # 使用正则表达式提取JSON对象
            json_match = re.search(r'\{.*\}', response, re.DOTALL)
            if json_match:
                json_str = json_match.group(0)
            else:
                # 如果找不到JSON对象，尝试移除前缀如"json\n"
                cleaned_response = response.strip()
                if cleaned_response.startswith("json"):
                    cleaned_response = cleaned_response[4:].strip()
                json_match = re.search(r'\{.*\}', cleaned_response, re.DOTALL)
                if json_match:
                    json_str = json_match.group(0)
                else:
                    # 无法提取JSON，使用默认值
                    json_str = ''
    
            try:
                result = json.loads(json_str)
            except json.JSONDecodeError:
                result = {
                    "reasoning": "N/A",
                    "conclusion": response.strip(),
                    "next_steps": []
                }
    
            for field in ["reasoning", "conclusion", "next_steps"]:
                if field not in result:
                    result[field] = ""
    
            if not isinstance(result["next_steps"], list):
                result["next_steps"] = [result["next_steps"]]
    
            return ThoughtStep(
                step_name=messages[-1].get("content", "").split("\n")[0],
                reasoning=result["reasoning"],
                conclusion=result["conclusion"],
                next_steps=result["next_steps"]
            )
    
        except Exception as e:
            self.logger.error(f"处理步骤失败: {str(e)}")
            raise

    async def _process_step(self, prompt: str, step_name: str) -> ThoughtStep:
        if self.rag_enhancer is not None:
            prompt = await self.rag_enhancer.enhance_prompt_with_existing(
                prompt,
                self.current_cpp_code,
                "\n\n".join(self.converted_classes_rust.values())
            )
        system_prompt = self.prompt_manager.prompts["system"]
        response = await self.ai_model.generate(prompt, system_prompt)
        try:
            result = json.loads(response)
        except json.JSONDecodeError:
            result = {
                "reasoning": "N/A",
                "conclusion": response.strip(),
                "next_steps": []
            }

        for field in ["reasoning", "conclusion", "next_steps"]:
            if field not in result:
                result[field] = ""

        if not isinstance(result["next_steps"], list):
            result["next_steps"] = [result["next_steps"]]

        return ThoughtStep(
            step_name=step_name,
            reasoning=result["reasoning"],
            conclusion=result["conclusion"],
            next_steps=result["next_steps"]
        )

@dataclass
class HeaderAnalysis:
    """头文件分析结果"""
    types: Dict[str, str]  # 类型定义
    functions: Dict[str, str]  # 函数声明
    macros: Dict[str, str]  # 宏定义
    constants: Dict[str, str]  # 常量定义
    includes: List[str]  # 包含的其他头文件
    source_file: Optional[str]  # 对应的源文件

    def to_json(self) -> str:
        return json.dumps({
            'types': self.types,
            'functions': self.functions,
            'macros': self.macros,
            'constants': self.constants,
            'includes': self.includes,
            'source_file': self.source_file
        }, indent=2)

    @classmethod
    def from_json(cls, json_str: str) -> 'HeaderAnalysis':
        data = json.loads(json_str)
        return cls(**data)

class SourceFileAnalyzer:
    """源文件分析器"""
    def __init__(self):
        self.logger = logging.getLogger(self.__class__.__name__)
        self._header_cache: Dict[str, HeaderAnalysis] = {}
        self._cache_lock = asyncio.Lock()

    async def analyze_header(self, header_path: str, ai_model: AIModel) -> HeaderAnalysis:
        """分析头文件"""
        header_path = str(Path(header_path))
        try:
            async with self._cache_lock:
                if header_path in self._header_cache:
                    self.logger.debug(f"Using cached header analysis for {header_path}")
                    return self._header_cache[header_path]
    
            content = await self._read_file(header_path)
            prompt = f"""Analyze this C/C++ header file and extract its key components:
    {content}
    
    Return your analysis in this JSON format:
    {{
        "types": {{"type_name": "definition"}},
        "functions": {{"function_name": "declaration"}},
        "macros": {{"macro_name": "definition"}},
        "constants": {{"constant_name": "value"}},
        "includes": ["included_header_files"]
    }}
    
    Important: Return ONLY the JSON object, without any markdown formatting or code block markers."""
    
            analysis_json = await ai_model.generate(prompt)
            try:
                # 清理可能的markdown标记
                cleaned_json = analysis_json.strip()
                if cleaned_json.startswith("```"):
                    cleaned_json = cleaned_json.split("\n", 1)[1]
                if cleaned_json.endswith("```"):
                    cleaned_json = cleaned_json.rsplit("\n", 1)[0]
                if cleaned_json.endswith("`"):
                    cleaned_json = cleaned_json[:-1]
                
                analysis_data = json.loads(cleaned_json)
                source_file = self._find_source_file(header_path)
                analysis_data['source_file'] = source_file
                analysis = HeaderAnalysis(**analysis_data)
    
                async with self._cache_lock:
                    self._header_cache[header_path] = analysis
    
                return analysis
            except json.JSONDecodeError:
                self.logger.error(f"Failed to parse header analysis JSON: {analysis_json}")
                raise
    
        except Exception as e:
            self.logger.error(f"Error analyzing header file {header_path}: {str(e)}")
            raise

    def _find_source_file(self, header_path: str) -> Optional[str]:
        """查找头文件对应的源文件"""
        header_path = Path(header_path)
        possible_extensions = ['.c', '.cpp', '.cxx', '.cc']
        
        for ext in possible_extensions:
            source_path = header_path.with_suffix(ext)
            if source_path.exists():
                return str(source_path)
        return None

    async def _read_file(self, file_path: str) -> str:
        """读取文件内容"""
        async with aiofiles.open(file_path, 'r') as f:
            return await f.read()

class CodeConverter:
    def __init__(self, config: Config):
        self.config = config
        self.ai_model = AIModel(config)
        self.file_handler = AsyncFileHandler()
        self.logger = logging.getLogger(self.__class__.__name__)
        self.source_analyzer = SourceFileAnalyzer()

        # 获取项目根目录（从Tool/src向上两级）
        self.project_root = Path(__file__).resolve().parent.parent.parent
        
        # 初始化输出目录结构
        self.output_dir = self.project_root / "Output" / "algorithms"
        self.src_output_dir = self.output_dir / "src"
        self.tests_output_dir = self.output_dir / "tests"
        
        # 创建必要的目录
        self.src_output_dir.mkdir(parents=True, exist_ok=True)
        self.tests_output_dir.mkdir(parents=True, exist_ok=True)

        # 初始化知识库和RAG
        self.knowledge_base = KnowledgeBase("knowledge")
        documents = self.knowledge_base.load_documents()
        if documents:
            self.rag_store = RAGStore("rag_storage", self.config, self.knowledge_base)
            self.rag_store.add_documents(documents)
            self.rag_enhancer = RAGEnhancer(self.rag_store)
        else:
            self.rag_store = None
            self.rag_enhancer = None

        self.thought_chain = ThoughtChainProcessor(self.ai_model, rag_enhancer=self.rag_enhancer)
        self.converted_structs_rust = {}
        self._stage = 0
        self._total_stages = 0

    def _sanitize_module_name(self, name: str) -> str:
        """将文件名转换为有效的Rust模块名"""
        # 移除扩展名
        name = Path(name).stem
        # 替换连字符为下划线
        name = name.replace('-', '_')
        # 移除测试前缀（如果存在）
        if name.startswith('test_'):
            name = name[5:]
        elif name.startswith('test-'):
            name = name[5:]
        return name

    async def _create_build_script(self):
        """创建构建和测试脚本"""
        script_content = '''#!/bin/bash
    set -e
    
    echo "Building Rust project..."
    cd Output/algorithms
    cargo build
    
    echo "Running tests..."
    cargo test -- --nocapture
    
    echo "All done!"
    '''
    
        script_path = Path("Output") / "run.sh"
        async with aiofiles.open(script_path, 'w') as f:
            await f.write(script_content)
    
        # 设置可执行权限
        os.chmod(script_path, 0o755)
        self.logger.info("Created build script: run.sh")

    async def _convert_source_directory(self, src_dir: Path):
        """转换源代码目录"""
        if not src_dir.exists():
            self.logger.warning(f"Source directory not found: {src_dir}")
            return

        source_files = []
        for ext in ['*.c', '*.h']:
            source_files.extend(src_dir.glob(ext))

        self.logger.info(f"Found {len(source_files)} source files")
        self._total_stages = len(source_files)

        # 先处理头文件
        header_analyses = {}
        for file_path in source_files:
            if file_path.suffix == '.h':
                self.logger.info(f"Analyzing header: {file_path}")
                analysis = await self.source_analyzer.analyze_header(str(file_path), self.ai_model)
                header_analyses[str(file_path)] = analysis

        # 然后处理源文件
        for file_path in source_files:
            if file_path.suffix == '.c':
                self._stage += 1
                self.logger.info(f"[{self._stage}/{self._total_stages}] Converting: {file_path}")

                # 查找对应的头文件分析
                header_path = file_path.with_suffix('.h')
                header_analysis = header_analyses.get(str(header_path))

                try:
                    await self.convert_file(str(file_path), header_analysis)
                except Exception as e:
                    self.logger.error(f"Failed to convert {file_path}: {str(e)}")

    async def _convert_test_directory(self, test_dir: Path):
        """转换测试目录"""
        if not test_dir.exists():
            self.logger.warning(f"Test directory not found: {test_dir}")
            return

        # 跳过特殊文件
        skip_files = {"test-alloc-testing.c", "framework.c"}

        test_files = []
        for path in test_dir.glob("test-*.c"):
            if path.name not in skip_files:
                test_files.append(path)

        self.logger.info(f"Found {len(test_files)} test files")

        # 并行转换测试文件
        conversion_tasks = []
        for test_file in test_files:
            self.logger.info(f"Converting test: {test_file}")
            task = self._convert_test_file(test_file)
            conversion_tasks.append(task)

        results = await asyncio.gather(*conversion_tasks, return_exceptions=True)

        for test_file, result in zip(test_files, results):
            if isinstance(result, Exception):
                self.logger.error(f"Failed to convert test {test_file}: {str(result)}")
            else:
                self.logger.info(f"Successfully converted test: {test_file}")

    async def _convert_test_file(self, test_file: Path):
        """转换单个测试文件"""
        # 读取测试文件内容
        cpp_code = await self.file_handler.read_cpp_file(str(test_file))

        # 修改提示以处理测试文件
        conversion_result = await self.thought_chain.process(
            cpp_code,
            self.converted_classes_rust,
            is_test=True
        )

        # 写入转换后的测试文件
        output_path = self.output_dir / "tests" / test_file.with_suffix('.rs').name
        async with aiofiles.open(output_path, 'w') as f:
            await f.write(conversion_result.final_code)

        self.logger.info(f"Successfully converted test: {test_file}")

    async def convert_project(self, input_dir: str) -> None:
        try:
            self.logger.info("=== Starting Project Conversion ===")
            source_files = await self.find_source_files(input_dir)

            if not source_files:
                self.logger.warning(f"No source files found in {input_dir}")
                return

            self.logger.info(f"Found {len(source_files)} source files")
            self._total_stages = len(source_files)

            # 并行分析所有头文件
            self.logger.info("=== Analyzing Header Files ===")
            header_tasks = []
            for source_file, header_file in source_files:
                if header_file:
                    self.logger.info(f"Analyzing header: {header_file}")
                    task = self.source_analyzer.analyze_header(header_file, self.ai_model)
                    header_tasks.append(task)
            header_results = await asyncio.gather(*header_tasks, return_exceptions=True)

            header_analyses = {}
            for (source_file, header_file), analysis in zip(source_files, header_results):
                if isinstance(analysis, Exception):
                    self.logger.error(f"Failed to analyze header {header_file}: {str(analysis)}")
                else:
                    header_analyses[header_file] = analysis

            # 并行转换所有源文件
            self.logger.info("=== Converting Source Files ===")
            conversion_tasks = []
            for source_file, header_file in source_files:
                self._stage += 1
                self.logger.info(f"[{self._stage}/{self._total_stages}] Converting: {source_file}")
                header_analysis = header_analyses.get(header_file) if header_file else None
                task = self.convert_file(source_file, header_analysis)
                conversion_tasks.append(task)

            results = await asyncio.gather(*conversion_tasks, return_exceptions=True)

            # 处理转换结果
            success_count = 0
            for (source_file, _), result in zip(source_files, results):
                if isinstance(result, Exception):
                    self.logger.error(f"Failed to convert {source_file}: {str(result)}")
                else:
                    success_count += 1

            self.logger.info("=== Conversion Completed ===")
            self.logger.info(f"Successfully converted {success_count}/{len(source_files)} files")

        except Exception as e:
            self.logger.error(f"Project conversion error: {str(e)}")
            raise

    async def find_source_files(self, directory: str) -> List[Tuple[str, Optional[str]]]:
        """查找目录中的源文件和对应的头文件"""
        source_files = []
        for ext in ['*.c', '*.cpp', '*.cxx', '*.cc']:
            pattern = os.path.join(directory, '**', ext)
            for file_path in glob.glob(pattern, recursive=True):
                # 查找对应的头文件
                header = None
                base_path = os.path.splitext(file_path)[0]
                for h_ext in ['.h', '.hpp', '.hxx', '.hh']:
                    h_path = base_path + h_ext
                    if os.path.exists(h_path):
                        header = h_path
                        break
                source_files.append((file_path, header))
        return source_files

    async def convert_file(self, cpp_file: str, header_analysis: Optional[HeaderAnalysis] = None) -> str:
        """
        转换文件，并在遇到编译错误后，更新知识库并重新尝试转换。
        """
        max_retries = 1  # 设置最大重试次数为1，即初次转换和一次重试
        attempt = 0
        last_error = None

        while attempt <= max_retries:
            try:
                self.logger.info(f"Processing file: {cpp_file} (Attempt {attempt + 1}/{max_retries + 1})")
                
                cpp_code = await self.file_handler.read_cpp_file(cpp_file)
                is_test = "test" in Path(cpp_file).name.lower()

                if header_analysis:
                    self.logger.info("Using header file analysis for conversion")
                    # 这里可以使用header_analysis的信息来辅助转换

                self.logger.info("Starting conversion process...")
                conversion_result = await self.thought_chain.process(
                    cpp_code, 
                    self.converted_structs_rust,
                    is_test=is_test
                )
                
                # 确定输出目录并写入文件
                output_dir = self.tests_output_dir if is_test else self.src_output_dir
                await self.file_handler.write_rust_file(
                    cpp_file,
                    conversion_result.final_code,
                    output_dir,
                    is_test=is_test
                )
                self.logger.info(f"Successfully wrote Rust code to {output_dir / Path(cpp_file).with_suffix('.rs').name}")

                self.logger.info(f"Successfully converted: {cpp_file} (Attempt {attempt + 1})")

                # 运行 rustc 并处理编译错误
                rust_file_name = Path(cpp_file).with_suffix('.rs').name.replace('-', '_')
                rust_file_path = output_dir / rust_file_name

                compile_errors = await self.run_rustc(rust_file_path)
                if compile_errors:
                    self.logger.info(f"Compilation errors found in {rust_file_path}, updating knowledge base and retrying...")
                    await self.handle_compile_errors(cpp_file, compile_errors, is_test)
                    last_error = compile_errors
                    attempt += 1
                    # 重试转换，基于更新后的知识库
                    await self.retry_conversion(cpp_file, is_test)
                    continue  # 重新尝试转换
                else:
                    # 编译成功，无需重试
                    self.logger.info(f"Compilation successful for {rust_file_path}")
                    return conversion_result.final_code

            except Exception as e:
                self.logger.error(f"Error converting file {cpp_file} on attempt {attempt + 1}: {str(e)}")
                last_error = str(e)
                attempt += 1

        # 如果达到最大重试次数后仍有错误，抛出异常
        raise ConversionError(f"Failed to convert {cpp_file} after {max_retries + 1} attempts. Last error: {last_error}")

    async def run_rustc(self, rust_file_path: Path) -> Optional[str]:
        """
        运行 rustc 检查 Rust 文件的编译错误，并返回错误信息。
        """
        try:
            self.logger.info(f"Running rustc on {rust_file_path}")
            process = await asyncio.create_subprocess_exec(
                self.config.rustc_path, '--emit', 'metadata', '--crate-type', 'lib', str(rust_file_path),
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            stdout, stderr = await process.communicate()
            if process.returncode != 0:
                error_message = stderr.decode()
                self.logger.error(f"rustc 编译错误: {error_message}")
                return error_message
            self.logger.info(f"rustc 检查通过: {rust_file_path}")
            return None
        except Exception as e:
            self.logger.error(f"运行 rustc 失败: {str(e)}")
            return None

    async def retry_conversion(self, cpp_file: str, is_test: bool) -> None:
        """
        基于特定知识文档，重新尝试转换文件，不走完整的思维链过程。
        """
        try:
            self.logger.info(f"Retrying conversion for file: {cpp_file}")

            # 确定知识文档的名称，例如 binary_heap.json
            file_stem = Path(cpp_file).stem.replace('-', '_')
            knowledge_doc_name = file_stem  # binary_heap

            # 读取特定知识文档
            knowledge_doc_path = self.knowledge_base.base_path / "api" / f"{knowledge_doc_name}.json"
            self.logger.info(f"Loading knowledge document: {knowledge_doc_path}")
            if not knowledge_doc_path.exists():
                self.logger.error(f"Knowledge document not found: {knowledge_doc_path}")
                raise FileError(f"Knowledge document not found: {knowledge_doc_path}")

            with open(knowledge_doc_path, 'r', encoding='utf-8') as f:
                knowledge_data = json.load(f)
            self.logger.debug(f"Loaded knowledge document content: {knowledge_data['content']}")

            # 确定输出目录
            output_dir = self.tests_output_dir if is_test else self.src_output_dir
            rust_file_path = output_dir / f"{knowledge_doc_name}.rs"

            self.logger.info(f"Reading Rust code from: {rust_file_path}")
            rust_code = self._read_rust_code(rust_file_path)
            if not rust_code:
                self.logger.error(f"Rust code is empty or could not be read from: {rust_file_path}")
                raise ConversionError("Rust code is empty or could not be read")

            # 转义花括号以避免格式化冲突
            rust_code_escaped = escape_braces(rust_code)
            cpp_code_escaped = escape_braces(cpp_file)  # 如果cpp_file是内容而非路径，请调整
            rustc_errors_escaped = escape_braces("compile_errors_placeholder")  # 需要传递实际的错误信息

            # 准备提示
            prompt = self.thought_chain.prompt_manager.get_prompt(
                "verify_result",
                rust_code=rust_code_escaped,
                cpp_code=cpp_file,  # 如果cpp_file是内容，请确保传递正确内容
                rustc_errors=rustc_errors_escaped
            )
            self.logger.debug(f"Retry conversion prompt: {prompt}")

            # 生成修正后的 Rust 代码
            corrected_code = await self.thought_chain.ai_model.generate(prompt)
            self.logger.info(f"Generated corrected Rust code for {cpp_file} based on knowledge document")

            # 确保响应仅包含代码
            corrected_code = corrected_code.strip()
            self.logger.debug(f"Corrected Rust code: {corrected_code}")

            # 写入修正后的 Rust 代码
            await self.file_handler.write_rust_file(
                cpp_file,
                corrected_code,
                output_dir,
                is_test=is_test
            )
            self.logger.info(f"Successfully wrote corrected Rust code to {rust_file_path}")

            # 再次运行 rustc 检查修正后的代码
            compile_errors = await self.run_rustc(rust_file_path)
            if compile_errors:
                self.logger.error(f"Retry conversion still has compilation errors in {rust_file_path}")
                raise ConversionError(f"Retry conversion failed with errors: {compile_errors}")
            else:
                self.logger.info(f"Retry conversion successful, no compilation errors in {rust_file_path}")

        except ConversionError as ce:
            self.logger.error(f"转换错误: {str(ce)}")
            # 根据需求，可以选择是否中断整个转换流程
            raise
        except Exception as e:
            self.logger.error(f"Retrying conversion failed for {cpp_file}: {str(e)}")
            raise

    async def handle_compile_errors(self, cpp_file: str, errors: str, is_test: bool):
        try:
            # 确定知识文档名称
            file_stem = Path(cpp_file).stem.replace('-', '_')
            knowledge_doc_name = file_stem
    
            # 加载知识文档，如果不存在则初始化为空内容
            knowledge_doc_path = self.knowledge_base.base_path / "error_resolution" / f"{knowledge_doc_name}.json"
            self.logger.info(f"Loading knowledge document: {knowledge_doc_path}")
    
            if knowledge_doc_path.exists():
                with open(knowledge_doc_path, 'r', encoding='utf-8') as f:
                    knowledge_data = json.load(f)
                self.logger.debug(f"Loaded knowledge document content: {knowledge_data['content']}")
            else:
                knowledge_data = {'content': ''}
                self.logger.info(f"Knowledge document does not exist, initializing empty content for: {knowledge_doc_path}")
    
            # 确定输出目录
            output_dir = self.tests_output_dir if is_test else self.src_output_dir
            rust_file_path = output_dir / f"{knowledge_doc_name}.rs"
    
            self.logger.info(f"Reading Rust code from: {rust_file_path}")
            rust_code = self._read_rust_code(rust_file_path)
            if not rust_code:
                self.logger.error(f"Rust code is empty or could not be read from: {rust_file_path}")
                raise ConversionError("Rust code is empty or could not be read")
    
            # 读取 C 文件的内容
            cpp_code_content = await self.file_handler.read_cpp_file(cpp_file)
    
            # 转义花括号
            rust_code_escaped = escape_braces(rust_code)
            cpp_code_escaped = escape_braces(cpp_code_content)  # 传递的是 C 代码内容
            rustc_errors_escaped = escape_braces(errors)
    
            # 准备提示
            prompt = self.thought_chain.prompt_manager.get_prompt(
                "verify_result",
                rust_code=rust_code_escaped,
                cpp_code=cpp_code_content,  # 传递的是 C 代码内容
                rustc_errors=rustc_errors_escaped
            )
            self.logger.debug(f"Handle compile errors prompt: {prompt}")
    
            # 生成改进建议
            improvement = await self.thought_chain.ai_model.generate(prompt)
            self.logger.info(f"Received improvement from AI for {cpp_file}")
    
            # 解析改进建议的JSON
            try:
                # 移除可能的代码块标记
                improvement = re.sub(r'^```json\s*', '', improvement)
                improvement = re.sub(r'```$', '', improvement).strip()
                improvement_data = json.loads(improvement)
                self.logger.debug(f"Improvement data: {improvement_data}")
            except json.JSONDecodeError:
                self.logger.error(f"AI 生成的内容无法解析为 JSON: {improvement}")
                raise ConversionError("AI 生成的内容格式不正确")
    
            # 确保响应包含必要字段
            if 'content' not in improvement_data or 'metadata' not in improvement_data:
                self.logger.error("AI 生成的内容缺少必要的字段")
                raise ConversionError("AI 生成的内容格式不正确")
    
            # 更新知识库
            self.knowledge_base.update_specific_document(
                doc_type=improvement_data['metadata']['category'],
                file_name=improvement_data['metadata']['subcategory'],
                new_content=improvement_data
            )
            self.logger.info(f"Updated knowledge document: {knowledge_doc_name}.json")
    
            # 重新加载知识库中的文档以更新 RAGStore
            documents = self.knowledge_base.load_documents()
            self.rag_store.add_documents(documents)
            self.logger.debug("Reloaded documents into RAGStore")
    
        except ConversionError as ce:
            self.logger.error(f"转换错误: {str(ce)}")
            raise
        except Exception as e:
            self.logger.error(f"处理编译错误时出错: {str(e)}")
            raise



    def _read_rust_code(self, rust_file_path: Path) -> str:
        """
        读取 Rust 文件内容，用于在 verify_result 提示中提供上下文。
        """
        try:
            with open(rust_file_path, 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            self.logger.error(f"读取 Rust 文件失败: {rust_file_path}, 错误: {str(e)}")
            return ""

async def async_main():
    try:
        # 1. 加载配置
        config = Config.from_yaml('config.yaml')
        setup_logging(config)
        converter = CodeConverter(config)

        # 2. 创建输出目录
        output_root = Path("../../Output") if Path.cwd().name == "src" else Path("Output")

        # 3. 确定输入目录路径
        input_root = Path("../../Input") if Path.cwd().name == "src" else Path("Input")
        src_dir = input_root / "src"

        if not input_root.exists():
            raise FileError(f"Input directory not found at {input_root}")
        if not src_dir.exists():
            raise FileError(f"Source directory not found at {src_dir}")

        # 检查是否有源文件
        source_files = list(src_dir.glob("*.[ch]"))
        if not source_files:
            raise FileError(f"No source files found in {src_dir}")

        # 4. 转换项目
        await converter.convert_project(str(input_root))

        print("Conversion completed!")

    except Exception as e:
        print(f"Error: {str(e)}")
        sys.exit(1)

def main():
    if sys.platform == "win32":
        # Windows specific event loop policy
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
    asyncio.run(async_main())

if __name__ == "__main__":
    main()
