"""
FinBERT 模型加载器
支持昇腾910B NPU加速
"""
import os
import torch
import logging
from typing import Optional
from transformers import AutoTokenizer, AutoModelForSequenceClassification

# 条件导入torch_npu
try:
    import torch_npu
    NPU_AVAILABLE = True
except ImportError:
    NPU_AVAILABLE = False

logger = logging.getLogger(__name__)


class FinbertModelLoader:
    """FinBERT模型加载器"""
    
    def __init__(self, model_id: str = "ProsusAI/finbert", cache_dir: Optional[str] = None, local_model_path: Optional[str] = None):
        self.model_id = model_id
        self.cache_dir = cache_dir or os.getenv("MODEL_CACHE_DIR", "/app/models")
        self.local_model_path = local_model_path or os.getenv("LOCAL_MODEL_PATH", None)
        self.device = None
        self.model = None
        self.tokenizer = None
        
    def _get_device(self):
        """获取计算设备"""
        if NPU_AVAILABLE and hasattr(torch, 'npu') and torch.npu.is_available():
            device = "npu:0"
            logger.info("使用昇腾NPU加速")
        elif torch.cuda.is_available():
            device = "cuda"
            logger.info("使用CUDA加速")
        else:
            device = "cpu"
            logger.info("使用CPU")
        return device
    
    def load_model(self):
        """加载模型"""
        try:
            self.device = self._get_device()
            torch_dtype = torch.float16 if self.device != "cpu" else torch.float32
            
            # 检查是否有本地模型路径
            use_local = self.local_model_path and os.path.exists(self.local_model_path)
            if use_local:
                logger.info(f"从本地路径加载模型: {self.local_model_path}")
                model_path = self.local_model_path
            else:
                logger.info(f"从HuggingFace加载模型: {self.model_id}")
                model_path = self.model_id

            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(
                model_path,
                cache_dir=self.cache_dir,
                trust_remote_code=True,
                local_files_only=use_local
            )
            
            # 加载模型 - 先尝试 safetensors，如果失败则使用 pytorch_model.bin
            try:
                self.model = AutoModelForSequenceClassification.from_pretrained(
                    model_path,
                    torch_dtype=torch_dtype,
                    low_cpu_mem_usage=True,
                    use_safetensors=True,
                    cache_dir=self.cache_dir,
                    trust_remote_code=True,
                    local_files_only=use_local
                )
            except (OSError, ValueError) as e:
                # 如果 safetensors 不存在，尝试使用 pytorch_model.bin
                if "safetensors" in str(e).lower() or "no file named" in str(e).lower():
                    logger.warning(f"未找到 safetensors 文件，尝试使用 pytorch_model.bin: {str(e)}")
                    try:
                        self.model = AutoModelForSequenceClassification.from_pretrained(
                            model_path,
                            torch_dtype=torch_dtype,
                            low_cpu_mem_usage=True,
                            use_safetensors=False,  # 使用 pytorch_model.bin
                            cache_dir=self.cache_dir,
                            trust_remote_code=True,
                            local_files_only=use_local
                        )
                    except (OSError, ValueError) as e2:
                        # 如果本地文件也不完整，允许从 HuggingFace 下载
                        if use_local and ("no file named" in str(e2).lower() or "not found" in str(e2).lower()):
                            logger.warning(f"本地模型文件不完整，尝试从 HuggingFace 下载缺失文件: {str(e2)}")
                            self.model = AutoModelForSequenceClassification.from_pretrained(
                                model_path,
                                torch_dtype=torch_dtype,
                                low_cpu_mem_usage=True,
                                use_safetensors=False,
                                cache_dir=self.cache_dir,
                                trust_remote_code=True,
                                local_files_only=False  # 允许下载
                            )
                        else:
                            raise
                else:
                    raise
            
            # 移动到设备
            self.model = self.model.to(self.device)
            self.model.eval()

            logger.info("模型加载成功")
            return True
            
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            raise
    
    def get_model(self):
        """获取模型"""
        if self.model is None:
            raise RuntimeError("模型未加载，请先调用load_model()")
        return self.model
    
    def get_tokenizer(self):
        """获取分词器"""
        if self.tokenizer is None:
            raise RuntimeError("Tokenizer未加载，请先调用load_model()")
        return self.tokenizer
    
    def get_device(self):
        """获取设备"""
        return self.device


