import logging

import torch
from transformers import (
    BertForSequenceClassification,
    BertForTokenClassification,
    BertForQuestionAnswering,
    AutoConfig
)
from .utils import get_quantization_config


def load_bert_model(
    model_name_or_path,
    task_type="classification",
    num_labels=2,
    use_quantization=False,
    from_scratch=False
):
    """
    加载BERT模型（根据任务类型自动适配模型头部）
    
    参数:
        model_name_or_path: 预训练模型名称或本地路径
        task_type: 任务类型，可选：classification/ner/qa
        num_labels: 标签数量（分类/NER任务需要）
        use_quantization: 是否启用4bit量化
        from_scratch: 是否从零初始化模型（不加载预训练权重）
    
    返回:
        初始化后的BERT模型
    """
    # 任务类型与模型类的映射
    task_model_map = {
        "classification": BertForSequenceClassification,
        "ner": BertForTokenClassification,
        "qa": BertForQuestionAnswering
    }
    
    if task_type not in task_model_map:
        raise ValueError(f"不支持的任务类型: {task_type}，可选类型: {list(task_model_map.keys())}")
    
    # 处理量化配置
    quantization_config = get_quantization_config() if use_quantization else None

    # model_class: BertForSequenceClassification
    model_class = task_model_map[task_type]
    
    try:
        if from_scratch:
            logging.info("if")
            # 从配置文件初始化（适用于训练全新模型）
            config = AutoConfig.from_pretrained(
                model_name_or_path,
                num_labels=num_labels
            )
            model = model_class(config=config)
        else:
            logging.info("else")
            # 加载预训练模型
            model = model_class.from_pretrained(
                model_name_or_path,
                num_labels=num_labels,
                quantization_config=quantization_config,
                ignore_mismatched_sizes=True  # 允许预训练权重与任务头部不匹配
            )
        return model
    except Exception as e:
        raise RuntimeError(f"模型加载失败: {str(e)}")


def freeze_bert_layers(model, freeze_layers=10):
    """冻结BERT底层参数（用于迁移学习）"""
    # 冻结embedding层
    for param in model.bert.embeddings.parameters():
        param.requires_grad = False
    
    # 冻结指定数量的Transformer层
    total_layers = len(model.bert.encoder.layer)
    freeze_layers = min(freeze_layers, total_layers)  # 防止超过实际层数
    
    for layer in model.bert.encoder.layer[:freeze_layers]:
        for param in layer.parameters():
            param.requires_grad = False
    
    return model


class BertForMultiLabelClassification(BertForSequenceClassification):
    """自定义多标签分类模型（替换原有的单标签头部）"""
    def __init__(self, config):
        super().__init__(config)
        self.config.problem_type = "multi_label_classification"
    
    def forward(self,** kwargs):
        outputs = super().forward(**kwargs)
        # 多标签分类使用sigmoid激活
        outputs.logits = torch.sigmoid(outputs.logits)
        return outputs


def save_model(model, save_path, tokenizer=None):
    """保存模型和分词器"""
    model.save_pretrained(save_path)
    if tokenizer:
        tokenizer.save_pretrained(save_path)


def move_model_to_device(model, device=None):
    """将模型移动到指定设备"""
    if device is None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    return model.to(device)


def count_model_parameters(model, trainable_only=True):
    """统计模型参数量"""
    if trainable_only:
        return sum(p.numel() for p in model.parameters() if p.requires_grad)
    return sum(p.numel() for p in model.parameters())
