#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Text2Code 推理模块
支持 LoRA 适配器和合并模型的代码生成
"""

import os
import ast
import textwrap
from typing import List, Tuple, Optional

# 设置 HF 镜像地址
os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")
os.environ.setdefault("HF_HUB_BASE_URL", os.environ["HF_ENDPOINT"])
os.environ.setdefault("HUGGINGFACE_HUB_BASE_URL", os.environ["HF_ENDPOINT"])

import torch
from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    GenerationConfig
)
from peft import PeftModel


# ==================== 配置管理 ====================

class InferenceConfig:
    """推理配置"""
    
    def __init__(self):
        # Model paths
        self.base_model_id = os.getenv("T2C_BASE_MODEL", "Salesforce/codet5-base")
        self.local_model_path = f"./models/{self.base_model_id.replace('/', '_')}"
        self.lora_path_complex = os.getenv("T2C_LORA_PATH_COMPLEX", "model/text2code_lora_complex")
        self.lora_path_simple = os.getenv("T2C_LORA_PATH_SIMPLE", "model/text2code_lora_simple")
        self.lora_path_multitask = os.getenv("T2C_LORA_PATH_MULTITASK", "model/multitask_lora")
        self.merged_path = os.getenv("T2C_MERGED_PATH", "model/text2code_merged")
        
        # Inference mode: auto|complex|simple|multitask|merged
        self.infer_mode = os.getenv("T2C_INFER_MODE", "auto").lower()
        
        # Device
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
        # Generation parameters
        self.max_new_tokens = int(os.getenv("T2C_MAX_NEW_TOKENS", "384"))  # 增加到384，给更详细实现留空间
        self.max_source_len = int(os.getenv("T2C_MAX_SRC", "512"))
        
        # Task prefix (optional)
        self.use_task_prefix = self._parse_bool("T2C_USE_TASK_PREFIX", False)
        self.prefix_simple = os.getenv("T2C_PREFIX_SIMPLE", "[TASK=SIMPLE] ")
        self.prefix_complex = os.getenv("T2C_PREFIX_COMPLEX", "[TASK=COMPLEX] ")
    
    def _parse_bool(self, name: str, default: bool = False) -> bool:
        v = os.getenv(name)
        if v is None:
            return default
        return str(v).strip().lower() in {"1", "true", "yes", "y", "on"}
    
    def get_base_model_path(self) -> Tuple[str, bool]:
        """返回 (模型路径, 仅本地标志)"""
        if os.path.exists(self.local_model_path):
            return self.local_model_path, True
        return self.base_model_id, False


# 全局配置实例
config = InferenceConfig()


# ==================== Few-Shot 示例 ====================

FEW_SHOTS = """Write a Python function that adds two integers.
Constraints: Output only one complete Python function; no classes or methods; no 'self' or 'cls'; no dunder methods; no comments; assume inputs are valid.

def add(a: int, b: int) -> int:
    return a + b

Write a Python function that returns the maximum number in a list.
Constraints: Output only one complete Python function; no classes or methods; no 'self' or 'cls'; no dunder methods; no comments; assume inputs are valid.

def find_max(nums: list[int]) -> int:
    return max(nums)

"""


# ==================== 模型加载 ====================

class ModelLoader:
    """模型加载管理器"""
    
    def __init__(self, config: InferenceConfig):
        self.config = config
        self.tokenizer = None
        self.model = None
        self.model_type = None  # merged|multi|complex|simple|base
    
    def _path_exists(self, path: str) -> bool:
        """Check if path exists and is a directory"""
        return path and os.path.exists(path) and os.path.isdir(path)
    
    def load(self):
        """Load model and tokenizer"""
        print("[INFO] Loading model...")
        
        base_path, local_only = self.config.get_base_model_path()
        
        # 1. Prefer merged model
        if self.config.infer_mode == "merged" or self._path_exists(self.config.merged_path):
            if self._path_exists(self.config.merged_path):
                print(f"[INFO] Using merged model: {self.config.merged_path}")
                self.tokenizer = AutoTokenizer.from_pretrained(self.config.merged_path)
                self.model = AutoModelForSeq2SeqLM.from_pretrained(self.config.merged_path)
                self.model_type = "merged"
                self._finalize_model()
                return
        
        # 2. Load base model
        print(f"[INFO] Loading base model: {base_path}")
        self.tokenizer = AutoTokenizer.from_pretrained(base_path, local_files_only=local_only)
        base_model = AutoModelForSeq2SeqLM.from_pretrained(base_path, local_files_only=local_only)
        
        complex_ok = self._path_exists(self.config.lora_path_complex)
        simple_ok = self._path_exists(self.config.lora_path_simple)
        multitask_ok = self._path_exists(self.config.lora_path_multitask)
        
        # 3. 根据模式加载 LoRA 适配器
        if self.config.infer_mode == "multitask" and multitask_ok:
            print(f"[INFO] 加载多任务适配器: {self.config.lora_path_multitask}")
            self.model = PeftModel.from_pretrained(base_model, self.config.lora_path_multitask, adapter_name="multitask")
            self.model_type = "multitask"
        
        elif self.config.infer_mode == "complex" and complex_ok:
            print(f"[INFO] 加载复杂任务适配器: {self.config.lora_path_complex}")
            self.model = PeftModel.from_pretrained(base_model, self.config.lora_path_complex, adapter_name="complex")
            self.model_type = "complex"
        
        elif self.config.infer_mode == "simple" and simple_ok:
            print(f"[INFO] 加载简单任务适配器: {self.config.lora_path_simple}")
            self.model = PeftModel.from_pretrained(base_model, self.config.lora_path_simple, adapter_name="simple")
            self.model_type = "simple"
        
        elif complex_ok or simple_ok or multitask_ok:
            # auto 模式: 优先级 multitask > complex > simple
            if multitask_ok:
                first_name = "multitask"
                first_path = self.config.lora_path_multitask
            elif complex_ok:
                first_name = "complex"
                first_path = self.config.lora_path_complex
            else:
                first_name = "simple"
                first_path = self.config.lora_path_simple
            
            print(f"[INFO] 加载适配器: {first_name}")
            self.model = PeftModel.from_pretrained(base_model, first_path, adapter_name=first_name)
            
            # 尝试加载额外适配器以支持多适配器模式
            try:
                if first_name == "multitask":
                    # 多任务模式独立，无需额外适配器
                    self.model_type = "multitask"
                elif first_name == "complex" and simple_ok:
                    self.model.load_adapter(self.config.lora_path_simple, adapter_name="simple")
                    print(f"[INFO] 已加载简单任务适配器")
                    self.model_type = "multi"
                elif first_name == "simple" and complex_ok:
                    self.model.load_adapter(self.config.lora_path_complex, adapter_name="complex")
                    print(f"[INFO] 已加载复杂任务适配器")
                    self.model_type = "multi"
                else:
                    self.model_type = first_name
            except Exception as e:
                print(f"[WARN] 加载额外适配器失败: {e}")
                self.model_type = first_name
        
        else:
            # 无适配器，使用基础模型
            print("[WARN] 未找到适配器，使用基础模型 (性能可能较差)")
            self.model = base_model
            self.model_type = "base"
        
        self._finalize_model()
    
    def _finalize_model(self):
        """完成模型设置"""
        self.model = self.model.to(self.config.device)
        self.model.eval()
        print(f"[成功] 模型加载成功 (type={self.model_type}, device={self.config.device})")


# ==================== 代码验证和格式化 ====================

class CodeValidator:
    """代码验证和格式化"""
    
    @staticmethod
    def validate_syntax(code: str) -> bool:
        """Check if Python syntax is correct"""
        try:
            ast.parse(code)
            return True
        except SyntaxError:
            return False
    
    @staticmethod
    def fix_indentation(code: str) -> str:
        """Fix indentation to 4 spaces"""
        lines = code.split("\n")
        fixed = textwrap.dedent("\n".join(lines)).strip()
        return fixed
    
    @staticmethod
    def format_output(code: str) -> str:
        """Clean and format output"""
        code = code.replace("```python", "").replace("```", "").strip()
        code = CodeValidator.fix_indentation(code)
        return code
    
    @staticmethod
    def is_valid_function(code: str) -> bool:
        """
        判断是否为"函数代码"（更合理的规则）：
        - 允许顶部存在 import / from import
        - 允许模块级文档字符串（module docstring）
        - 顶层至少一个函数定义（允许多个相关函数，例如生成+验证配对）
        - 不允许顶层 class 定义
        - 函数名不能是 dunder（不以 __ 开头）
        - 函数参数中不得出现 self/cls（要求普通函数而非方法）
        - 语法必须可被 AST 正确解析
        """
        s = (code or "").strip()
        # AST 验证
        try:
            tree = ast.parse(s)
        except SyntaxError:
            return False

        # 顶层函数定义数量 - 至少一个，允许多个
        top_funcs = [n for n in tree.body if isinstance(n, ast.FunctionDef)]
        if len(top_funcs) < 1:
            return False

        # 顶层不允许出现 class 定义
        if any(isinstance(n, ast.ClassDef) for n in tree.body):
            return False

        # 顶层仅允许：导入语句、模块 docstring、函数定义
        for node in tree.body:
            if isinstance(node, ast.FunctionDef):
                continue
            if isinstance(node, (ast.Import, ast.ImportFrom)):
                continue
            # 模块 docstring 表现为第一个语句是一个常量字符串的 Expr
            if isinstance(node, ast.Expr) and isinstance(getattr(node, "value", None), ast.Constant) and isinstance(node.value.value, str):
                continue
            # 其他任何顶层语句都视为不符合约束
            return False

        # 函数签名约束 - 检查所有函数
        for fn in top_funcs:
            if fn.name.startswith("__"):
                return False
            for arg in list(getattr(fn.args, "args", [])) + list(getattr(fn.args, "posonlyargs", [])) + list(getattr(fn.args, "kwonlyargs", [])):
                if arg.arg.lower() in {"self", "cls"}:
                    return False
        return True
    
    @staticmethod
    def extract_first_function(text: str) -> str:
        """从文本中基于 AST 提取最后一个顶层函数（更贴合 few-shot 在前、答案在后的模式）。"""
        src = text or ""
        try:
            tree = ast.parse(src)
        except SyntaxError:
            # 回退到简单字符串查找
            s = src
            idx = s.find("\ndef ")
            if idx == -1:
                idx = s.find("def ")
            return s[idx:].strip() if idx != -1 else src

        # 找到最后一个顶层函数（通常是模型在 few-shot 之后给出的目标函数）
        last_fn = None
        for node in tree.body:
            if isinstance(node, ast.FunctionDef):
                last_fn = node
        if last_fn is not None:
            lines = src.splitlines()
            start = max(last_fn.lineno - 1, 0)
            end = getattr(last_fn, "end_lineno", None)
            if end is None:
                return "\n".join(lines[start:]).strip()
            return "\n".join(lines[start:end]).strip()
        return src


# ==================== 提示构建 ====================

class PromptBuilder:
    """提示构建器 - 增强版，包含API提示"""
    
    # 常用API签名参考（减少参数错误）
    API_HINTS = {
        "rotatingfilehandler": "RotatingFileHandler(filename, mode='a', maxBytes=0, backupCount=0)",
        "requests.get": "requests.get(url, params=None, timeout=None, headers=None)",
        "requests.post": "requests.post(url, data=None, json=None, timeout=None)",
        "pandas.read_csv": "pd.read_csv(filepath, sep=',', header='infer', names=None, usecols=None)",
        "json.loads": "json.loads(s, strict=True)",
        "json.dumps": "json.dumps(obj, indent=None, ensure_ascii=True)",
    }
    
    def __init__(self, config: InferenceConfig):
        self.config = config
    
    def build_prompt(self, user_input: str, task_mode: Optional[str] = None, template_guide: Optional[str] = None) -> str:
        """构建完整输入提示 (仅英文) - 简洁增强版
        
        Args:
            user_input: 用户输入
            task_mode: 任务模式 (simple/complex/auto)
            template_guide: 可选的模板示例，用于引导AI生成
        """
        p = (user_input or "").strip()
        
        # 如果用户已提供完整模板，直接使用
        if p.startswith("Write a Python function"):
            return self._add_task_prefix(p, task_mode)
        
        # 构建简洁但明确的指令
        instruction = (
            f"Write a Python function that {p}.\n\n"
            f"Requirements:\n"
            f"- Import necessary modules at the top\n"
            f"- Use correct standard library APIs with proper parameters\n"
            f"- Add type hints (typing module)\n"
            f"- Include a docstring\n"
            f"- Return appropriate value\n\n"
            f"Output ONLY the function code, no explanations."
        )
        
        # 如果有模板引导，作为参考示例
        if template_guide:
            instruction = (
                f"Here is a REFERENCE EXAMPLE showing the expected code quality and structure:\n\n"
                f"```python\n{template_guide}\n```\n\n"
                f"Now generate similar high-quality code for this task:\n\n"
                f"{instruction}"
            )
        
        # 添加任务前缀
        instruction = self._add_task_prefix(instruction, task_mode)
        
        # 添加 few-shot 示例：仅在 simple 任务使用，complex 避免示例污染输出
        if (task_mode or "auto").lower() == "simple":
            return FEW_SHOTS + "\n### Task:\n" + instruction
        return instruction
    
    def _add_task_prefix(self, text: str, task_mode: Optional[str]) -> str:
        """如果启用则添加任务前缀"""
        if self.config.use_task_prefix and task_mode in {"simple", "complex"}:
            prefix = self.config.prefix_simple if task_mode == "simple" else self.config.prefix_complex
            return prefix + text
        return text
    
    @staticmethod
    def detect_task_mode(prompt: str) -> str:
        """启发式任务类型检测"""
        p = (prompt or "").strip().lower()
        
        simple_keys = [
            "list", "string", "array", "sum", "sort", "maximum", "minimum",
            "fibonacci", "palindrome", "prime", "reverse", "factorial"
        ]
        
        complex_keys = ["api", "database", "http", "file", "logging", "class", "framework"]
        
        if any(k in p for k in simple_keys) and not any(k in p for k in complex_keys) and len(p) < 220:
            return "simple"
        return "complex"


# ==================== 代码生成器 ====================

class CodeGenerator:
    """代码生成器 - 增强创新功能"""
    
    def __init__(self, model_loader: ModelLoader, config: InferenceConfig):
        self.model = model_loader.model
        self.tokenizer = model_loader.tokenizer
        self.model_type = model_loader.model_type
        self.config = config
        self.prompt_builder = PromptBuilder(config)
        self.validator = CodeValidator()
        
        # 创新 1: 自适应温度处理器
        self.use_adaptive_temp = self._parse_bool("T2C_USE_ADAPTIVE_TEMP", True)
        self.adaptive_temp_processor = None
        if self.use_adaptive_temp:
            try:
                from utils.adaptive_temperature import AdaptiveTemperatureProcessor
                self.adaptive_temp_processor = AdaptiveTemperatureProcessor(
                    self.tokenizer,
                    temp_signature=0.2,
                    temp_body=0.7,
                    temp_expression=0.5,
                    verbose=False
                )
                print("[创新] 自适应温度已启用")
            except Exception as e:
                print(f"[WARN] 加载自适应温度失败: {e}")
        
        # 创新 2: 导入预测器
        self.use_import_predictor = self._parse_bool("T2C_USE_IMPORT_PREDICTOR", True)
        self.import_predictor = None
        if self.use_import_predictor:
            try:
                from utils.import_predictor import ImportPredictor
                self.import_predictor = ImportPredictor()
                print("[创新] 导入预测器已启用")
            except Exception as e:
                print(f"[WARN] 加载导入预测器失败: {e}")
    
    def _parse_bool(self, name: str, default: bool = False) -> bool:
        v = os.getenv(name)
        if v is None:
            return default
        return str(v).strip().lower() in {"1", "true", "yes", "y", "on"}
    
    def generate(self, prompt: str, mode: Optional[str] = None, template_guide: Optional[str] = None) -> str:
        """
        生成代码 - 增强创新功能
        
        Args:
            prompt: 自然语言描述
            mode: 强制模式 (simple|complex|auto), 默认使用配置
            template_guide: 可选的模板示例，用于引导AI生成高质量代码
        
        Returns:
            生成的 Python 代码 (必要时自动生成导入语句)
        """
        # 确定任务模式
        task_mode = (mode or self.config.infer_mode or "auto").lower()
        
        if task_mode == "auto":
            task_mode = self.prompt_builder.detect_task_mode(prompt)
        
        # 切换适配器 (如果支持)
        self._switch_adapter(task_mode)
        
        # 构建输入 (传入模板引导)
        input_text = self.prompt_builder.build_prompt(prompt, task_mode, template_guide)
        
        # 生成代码
        result = self._generate_with_beam_search(input_text)
        
        # 创新 2: 自动补全导入语句
        if self.import_predictor:
            result = self.import_predictor.augment_code(result)
        
        # 验证和修复
        if not self.validator.is_valid_function(result) or not self.validator.validate_syntax(result):
            # 尝试提取函数
            extracted = self.validator.extract_first_function(result)
            if self.validator.is_valid_function(extracted) and self.validator.validate_syntax(extracted):
                # 重新应用导入预测器
                if self.import_predictor:
                    extracted = self.import_predictor.augment_code(extracted)
                return extracted
            
            # 使用采样策略重试
            result = self._generate_with_sampling(input_text)
            
            # 重新应用导入预测器
            if self.import_predictor:
                result = self.import_predictor.augment_code(result)
            
            extracted = self.validator.extract_first_function(result)
            if self.validator.is_valid_function(extracted) and self.validator.validate_syntax(extracted):
                if self.import_predictor:
                    extracted = self.import_predictor.augment_code(extracted)
                return extracted
        
        # 返回结果 (可能带警告)
        if not self.validator.validate_syntax(result):
            result += "\n# 警告: 代码语法可能有问题，请手动检查。"
        
        return result
    
    def _switch_adapter(self, task_mode: str):
        """切换适配器 (仅多适配器模型)"""
        if self.model_type != "multi":
            return
        
        if not hasattr(self.model, "set_adapter"):
            return
        
        if task_mode in {"simple", "complex"}:
            try:
                self.model.set_adapter(task_mode)
                print(f"[DEBUG] 已切换到 {task_mode} 适配器")
            except Exception as e:
                print(f"[WARN] 切换适配器失败: {e}")
    
    def _tokenize_input(self, text: str):
        """分词输入"""
        return self.tokenizer(
            text,
            return_tensors="pt",
            truncation=True,
            max_length=self.config.max_source_len,
        ).to(self.config.device)
    
    def _generate_with_beam_search(self, input_text: str) -> str:
        """使用束搜索生成 (更稳定) - 增强创新功能"""
        inputs = self._tokenize_input(input_text)
        
        generation_config = GenerationConfig(
            do_sample=False,
            num_beams=5,  # 增加 beam 数量，提升质量
            max_new_tokens=self.config.max_new_tokens,
            no_repeat_ngram_size=3,  # 降低到3，允许更自然的重复
            repetition_penalty=1.15,  # 降低惩罚，避免过度抑制
            length_penalty=1.0,  # 鼓励更完整的实现
            early_stopping=True,  # 提前停止，避免无意义生成
        )
        
        # 创新: 加载 logits 处理器 (如果启用)
        logits_processors = self._get_logits_processors()
        
        with torch.no_grad():
            if logits_processors:
                outputs = self.model.generate(
                    **inputs,
                    generation_config=generation_config,
                    logits_processor=logits_processors
                )
            else:
                outputs = self.model.generate(**inputs, generation_config=generation_config)
        
        result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return self.validator.format_output(result)
    
    def _generate_with_sampling(self, input_text: str) -> str:
        """使用采样策略生成 (更多样化) - 增强版"""
        inputs = self._tokenize_input(input_text)
        
        # 创新 1: 如果启用则使用自适应温度
        if self.adaptive_temp_processor and self.use_adaptive_temp:
            generation_config = GenerationConfig(
                do_sample=True,
                temperature=1.0,  # 将被自适应处理器覆盖
                top_p=0.9,
                num_beams=1,
                max_new_tokens=self.config.max_new_tokens,
                no_repeat_ngram_size=4,
                repetition_penalty=1.25,
            )
            logits_processors = [self.adaptive_temp_processor]
        else:
            generation_config = GenerationConfig(
                do_sample=True,
                temperature=0.4,
                top_p=0.9,
                num_beams=1,
                max_new_tokens=self.config.max_new_tokens,
                no_repeat_ngram_size=4,
                repetition_penalty=1.25,
            )
            logits_processors = None
        
        with torch.no_grad():
            if logits_processors:
                outputs = self.model.generate(
                    **inputs,
                    generation_config=generation_config,
                    logits_processor=logits_processors
                )
            else:
                outputs = self.model.generate(**inputs, generation_config=generation_config)
        
        result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return self.validator.format_output(result)
    
    def _get_logits_processors(self):
        """获取用于生成的 logits 处理器 (创新支持)"""
        # 对于束搜索，不使用自适应温度 (不兼容)
        # 仅在需要时返回其他模式的处理器
        return None


# ==================== 全局实例初始化 ====================

# 初始化模型
model_loader = ModelLoader(config)
model_loader.load()

# 初始化生成器
generator = CodeGenerator(model_loader, config)


# ==================== 公共接口 ====================

def generate_code(prompt: str, mode: Optional[str] = None, force_few_shots: bool = True, max_new_tokens: int = None, template_guide: Optional[str] = None) -> str:
    """
    从自然语言描述生成 Python 代码
    
    Args:
        prompt: 自然语言描述 (英文)
        mode: 任务模式 (simple|complex|auto)
        force_few_shots: 是否包含 few-shot 示例 (默认: True, 为兼容性保留)
        max_new_tokens: 最大生成 token 数 (可选覆盖)
        template_guide: 可选的模板示例，用于引导AI生成
    
    Returns:
        生成的 Python 代码
    """
    # 临时覆盖 max_new_tokens (如果提供)
    if max_new_tokens is not None:
        old_max = generator.config.max_new_tokens
        generator.config.max_new_tokens = max_new_tokens
        try:
            result = generator.generate(prompt, mode, template_guide)
        finally:
            generator.config.max_new_tokens = old_max
        return result
    
    # 注意: force_few_shots 在当前实现中始终为 True (few-shots 在 PromptBuilder 中)
    return generator.generate(prompt, mode, template_guide)


def validate_python_code(code: str) -> bool:
    """
    验证 Python 代码语法是否正确
    
    Args:
        code: Python 代码字符串
    
    Returns:
        语法有效返回 True, 否则返回 False
    """
    return CodeValidator.validate_syntax(code)


def looks_like_function(code: str) -> bool:
    """
    检查代码是否看起来像有效的单个函数
    
    Args:
        code: Python 代码字符串
    
    Returns:
        代码为有效函数返回 True, 否则返回 False
    """
    return CodeValidator.is_valid_function(code)


# ==================== 命令行测试接口 ====================

if __name__ == "__main__":
    print("[INFO] Text-to-Code 推理系统")
    print("输入自然语言描述，按 Enter 生成 Python 代码。输入 'exit' 退出。\n")
    
    while True:
        query = input(">>> 你的描述: ")
        if query.strip().lower() == "exit":
            break
        
        code = generate_code(query)
        print("\n[生成代码]:\n")
        print(code)
        print("\n" + "="*80 + "\n")
