#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
INT8 量化推理模块
使用 bitsandbytes 实现高效 8 位量化
预期: 保留 95-98% 性能，内存减少 4 倍
"""

import os
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig
from peft import PeftModel
from typing import Optional

# 设置 HF 镜像
os.environ.setdefault("HF_ENDPOINT", "https://hf-mirror.com")
os.environ.setdefault("HF_HUB_BASE_URL", os.environ["HF_ENDPOINT"])

# ==================== 配置 ====================

class QuantizedInferenceConfig:
    """量化推理配置"""
    
    def __init__(self):
        # Model paths
        self.base_model_id = os.getenv("T2C_BASE_MODEL", "Salesforce/codet5-base")
        self.local_model_path = f"./models/{self.base_model_id.replace('/', '_')}"
        self.lora_path_complex = os.getenv("T2C_LORA_PATH_COMPLEX", "model/text2code_lora_complex")
        self.lora_path_multitask = os.getenv("T2C_LORA_PATH_MULTITASK", "model/multitask_lora")
        
        # 模型选择: complex|multitask|auto
        self.model_type = os.getenv("T2C_QUANTIZE_MODEL", "auto").lower()
        
        # 量化设置
        self.use_8bit = self._parse_bool("T2C_USE_8BIT", True)
        self.use_4bit = self._parse_bool("T2C_USE_4BIT", False)  # 更激进的量化
        
        # 生成参数
        self.max_new_tokens = int(os.getenv("T2C_MAX_NEW_TOKENS", "256"))
        self.max_source_len = int(os.getenv("T2C_MAX_SRC", "512"))
        
        # 设备
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
    
    def _parse_bool(self, name: str, default: bool = False) -> bool:
        v = os.getenv(name)
        if v is None:
            return default
        return str(v).strip().lower() in {"1", "true", "yes", "y", "on"}
    
    def get_base_model_path(self):
        """返回模型路径 (优先本地)"""
        if os.path.exists(self.local_model_path):
            return self.local_model_path
        return self.base_model_id
    
    def get_lora_path(self):
        """根据 model_type 获取 LoRA 适配器路径"""
        # 自动检测: 优先 multitask > complex
        if self.model_type == "auto":
            if os.path.exists(self.lora_path_multitask):
                return self.lora_path_multitask, "multitask"
            elif os.path.exists(self.lora_path_complex):
                return self.lora_path_complex, "complex"
            else:
                return None, None
        
        # 显式选择
        if self.model_type == "multitask":
            return self.lora_path_multitask, "multitask"
        elif self.model_type == "complex":
            return self.lora_path_complex, "complex"
        
        return None, None


# 全局配置
config = QuantizedInferenceConfig()


# ==================== 模型加载器 ====================

class QuantizedModelLoader:
    """加载 INT8/INT4 量化模型"""
    
    def __init__(self, config: QuantizedInferenceConfig):
        self.config = config
        self.tokenizer = None
        self.model = None
        self.quantization_type = None
        self.adapter_type = None  # 跟踪加载的适配器
    
    def load(self):
        """加载量化模型"""
        print("=" * 60)
        print("   INT8 量化模型加载器")
        print("=" * 60)
        
        if not torch.cuda.is_available():
            print("[WARN] CUDA 不可用。量化需要 GPU。")
            print("[INFO] 降级为非量化 CPU 推理...")
            return self._load_cpu()
        
        # 检查 bitsandbytes 可用性
        try:
            import bitsandbytes as bnb
            print(f"[INFO] bitsandbytes 版本: {bnb.__version__}")
        except ImportError:
            print("[错误] bitsandbytes 未安装!")
            print("[INFO] 安装方法: pip install bitsandbytes accelerate")
            print("[INFO] 降级为非量化推理...")
            return self._load_cpu()
        
        base_path = self.config.get_base_model_path()
        print(f"[INFO] 加载基础模型: {base_path}")
        
        # 加载分词器
        self.tokenizer = AutoTokenizer.from_pretrained(base_path)
        
        # 加载量化基础模型
        if self.config.use_4bit:
            print("[INFO] 使用 INT4 量化加载 (4-bit)...")
            from transformers import BitsAndBytesConfig
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_compute_dtype=torch.float16,
                bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4"
            )
            base_model = AutoModelForSeq2SeqLM.from_pretrained(
                base_path,
                quantization_config=bnb_config,
                device_map="auto",
                torch_dtype=torch.float16
            )
            self.quantization_type = "INT4"
        else:
            print("[INFO] 使用 INT8 量化加载 (8-bit)...")
            base_model = AutoModelForSeq2SeqLM.from_pretrained(
                base_path,
                load_in_8bit=True,
                device_map="auto",
                torch_dtype=torch.float16
            )
            self.quantization_type = "INT8"
        
        # 加载 LoRA 适配器
        lora_path, adapter_type = self.config.get_lora_path()
        
        if lora_path and os.path.exists(lora_path):
            print(f"[INFO] 加载 {adapter_type} LoRA 适配器: {lora_path}")
            self.model = PeftModel.from_pretrained(base_model, lora_path)
            self.adapter_type = adapter_type
        else:
            print(f"[WARN] LoRA 适配器未找到")
            print("[INFO] 仅使用基础模型")
            self.model = base_model
            self.adapter_type = "base"
        
        self.model.eval()
        
        # 打印内存使用
        if torch.cuda.is_available():
            allocated = torch.cuda.memory_allocated() / 1024**3
            reserved = torch.cuda.memory_reserved() / 1024**3
        print(f"\n[内存使用]")
        print(f"  已分配: {allocated:.2f} GB")
        print(f"  已保留: {reserved:.2f} GB")
        
        print(f"\n[成功] 模型加载成功!")
        print(f"  适配器: {self.adapter_type}")
        print(f"  量化: {self.quantization_type}")
        print("=" * 60)
    
    def _load_cpu(self):
        """降级方案: 在 CPU 上加载非量化模型"""
        base_path = self.config.get_base_model_path()
        self.tokenizer = AutoTokenizer.from_pretrained(base_path)
        base_model = AutoModelForSeq2SeqLM.from_pretrained(base_path)
        
        lora_path, adapter_type = self.config.get_lora_path()
        
        if lora_path and os.path.exists(lora_path):
            self.model = PeftModel.from_pretrained(base_model, lora_path)
            self.adapter_type = adapter_type
        else:
            self.model = base_model
            self.adapter_type = "base"
        
        self.model.eval()
        self.quantization_type = "None (CPU)"
        print(f"[成功] 模型已在 CPU 上加载 (无量化)")
        print(f"[INFO] 使用 {self.adapter_type} 适配器")


# ==================== 代码生成器 ====================

class QuantizedCodeGenerator:
    """使用量化模型的代码生成器"""
    
    def __init__(self, model_loader: QuantizedModelLoader):
        self.model = model_loader.model
        self.tokenizer = model_loader.tokenizer
        self.config = model_loader.config
        self.quantization_type = model_loader.quantization_type
        self.adapter_type = model_loader.adapter_type
    
    def generate(self, prompt: str, mode: str = "complex") -> str:
        """从提示生成代码"""
        # 构建输入 (简单模板)
        if not prompt.strip().startswith("Write a Python function"):
            prompt = f"Write a Python function that {prompt}."
        
        # 分词
        inputs = self.tokenizer(
            prompt,
            return_tensors="pt",
            truncation=True,
            max_length=self.config.max_source_len
        )
        inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
        
        # 生成
        generation_config = GenerationConfig(
            max_new_tokens=self.config.max_new_tokens,
            num_beams=4,
            temperature=0.7,
            do_sample=False,
            no_repeat_ngram_size=4,
            repetition_penalty=1.2
        )
        
        with torch.no_grad():
            outputs = self.model.generate(**inputs, generation_config=generation_config)
        
        # 解码
        code = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 格式化
        code = code.replace("```python", "").replace("```", "").strip()
        
        return code


# ==================== 全局实例 ====================

# 加载模型
print("\n[初始化量化推理系统...]")
model_loader = QuantizedModelLoader(config)
model_loader.load()

# 创建生成器
generator = QuantizedCodeGenerator(model_loader)


# ==================== 公共接口 ====================

def generate_code(prompt: str, mode: Optional[str] = None) -> str:
    """
    从自然语言描述生成 Python 代码
    
    Args:
        prompt: 自然语言描述
        mode: 任务模式 (为兼容性保留，已忽略)
    
    Returns:
        生成的 Python 代码
    """
    return generator.generate(prompt, mode or "complex")


# ==================== 命令行接口 ====================

if __name__ == "__main__":
    import time
    
    print("\n" + "=" * 60)
    print("   量化文本转代码推理系统")
    print(f"   量化: {generator.quantization_type}")
    print("=" * 60)
    print("\n输入描述 (输入 'exit' 退出):\n")
    
    while True:
        try:
            query = input(">>> ")
            if query.strip().lower() in ["exit", "quit", "q"]:
                break
            
            if not query.strip():
                continue
            
            # 测量推理时间
            start_time = time.time()
            code = generate_code(query)
            elapsed = time.time() - start_time
            
            print(f"\n[生成代码] ({elapsed:.2f}s):\n")
            print(code)
            print("\n" + "-" * 60 + "\n")
        
        except KeyboardInterrupt:
            break
        except Exception as e:
            print(f"\n[错误] {e}\n")
