#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
BERT模型微调模块
用于基于数据库中的训练数据对BERT模型进行微调
"""

import json
import os
import torch
import logging
from torch.utils.data import Dataset, DataLoader
from transformers import (
    AutoTokenizer, 
    AutoModelForTokenClassification,
    TrainingArguments, 
    Trainer,
    DataCollatorForTokenClassification
)
from typing import List, Dict, Any, Tuple
from datetime import datetime

# 获取日志记录器
# 确保日志目录存在
log_dir = "logs"
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

# 为这个模块创建独立的logger
logger = logging.getLogger('bert_finetuner')
logger.setLevel(logging.INFO)

# 清除可能存在的旧处理器
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# 创建文件处理器
file_handler = logging.FileHandler(os.path.join(log_dir, "bert_finetuner.log"), encoding='utf-8')
file_handler.setLevel(logging.INFO)

# 创建控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)

# 创建格式化器
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)

# 添加处理器到logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)

# 防止日志传播到根logger
logger.propagate = False

class NERDataset(Dataset):
    """
    NER数据集类，用于处理训练数据
    """
    
    def __init__(self, texts: List[str], labels: List[List[int]], tokenizer, label_list: List[str], max_length: int = 512):
        """
        初始化数据集
        
        Args:
            texts: 文本列表
            labels: 标签列表
            tokenizer: 分词器
            label_list: 标签列表
            max_length: 最大序列长度
        """
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.label_list = label_list
        self.num_labels = len(label_list)
        self.max_length = max_length
        
        logger.info(f"NERDataset初始化:")
        logger.info(f"  文本数量: {len(texts)}")
        logger.info(f"  标签数量: {len(labels)}")
        logger.info(f"  标签列表: {label_list}")
        logger.info(f"  标签数量: {self.num_labels}")
        
        # 验证标签
        self._validate_labels()
    
    def _validate_labels(self):
        """
        验证所有标签是否在有效范围内
        """
        logger.info(f"验证标签，标签总数: {self.num_labels}")
        invalid_count = 0
        for i, label_seq in enumerate(self.labels):
            for j, label_id in enumerate(label_seq):
                # 确保标签是整数类型
                if not isinstance(label_id, int):
                    try:
                        label_id = int(label_id)
                        self.labels[i][j] = label_id
                    except (ValueError, TypeError):
                        self.labels[i][j] = 0  # 默认为'O'标签
                
                # 检查并修正标签范围
                if label_id != -100:
                    if label_id < 0:
                        self.labels[i][j] = 0
                        invalid_count += 1
                    elif label_id >= self.num_labels:
                        self.labels[i][j] = self.num_labels - 1
                        invalid_count += 1
        
        if invalid_count > 0:
            logger.info(f"发现并修正了 {invalid_count} 个超出范围的标签")
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        labels = self.labels[idx]
        
        # 对文本进行编码
        encoding = self.tokenizer(
            text,
            truncation=True,
            padding='max_length',
            max_length=self.max_length,
            return_tensors='pt'
        )
        
        # 处理标签
        encoded_labels = self._align_labels_to_tokens(encoding, labels)
        
        # 确保所有标签都是整数类型并在有效范围内
        for i, label_id in enumerate(encoded_labels):
            if label_id != -100:
                # 强制转换为整数并限制在有效范围内
                label_id = int(label_id)
                if label_id < 0:
                    encoded_labels[i] = 0
                elif label_id >= self.num_labels:
                    encoded_labels[i] = self.num_labels - 1
                else:
                    encoded_labels[i] = label_id
        
        # 转换为张量
        labels_tensor = torch.tensor(encoded_labels, dtype=torch.long)
        
        # 最终验证
        unique_labels = torch.unique(labels_tensor)
        for label in unique_labels:
            label_val = label.item()
            if label_val != -100 and (label_val < 0 or label_val >= self.num_labels):
                logger.error(f"标签验证失败: 索引 {label_val} 超出范围 [0, {self.num_labels})")
                # 强制修正
                labels_tensor[labels_tensor == label_val] = 0
        
        # 打印调试信息
        if idx == 0:  # 只打印第一个样本的调试信息
            logger.debug(f"样本 {idx} 调试信息:")
            logger.debug(f"  文本: {text[:50]}...")
            logger.debug(f"  原始标签长度: {len(labels)}")
            logger.debug(f"  编码后标签长度: {len(encoded_labels)}")
            logger.debug(f"  标签张量形状: {labels_tensor.shape}")
            unique_labels = torch.unique(labels_tensor)
            logger.debug(f"  唯一标签: {unique_labels.tolist()}")
            logger.debug(f"  标签列表: {self.label_list}")
            logger.debug(f"  标签数量: {self.num_labels}")
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': labels_tensor
        }
    
    def _align_labels_to_tokens(self, encoding, labels):
        """
        将标签与分词后的token对齐
        """
        word_ids = encoding.word_ids()
        previous_word_idx = None
        label_ids = []
        for word_idx in word_ids:
            if word_idx is None:
                label_ids.append(-100)
            elif word_idx != previous_word_idx:
                # 检查word_idx是否在labels范围内
                if word_idx < len(labels):
                    label_id = labels[word_idx]
                    # 确保标签是整数
                    if not isinstance(label_id, int):
                        try:
                            label_id = int(label_id)
                        except (ValueError, TypeError):
                            label_id = 0
                    label_ids.append(label_id)
                else:
                    label_ids.append(0)  # 使用'O'标签
            else:
                # 对于子词，使用与前一个token相同的标签
                if label_ids:  # 确保列表不为空
                    label_ids.append(label_ids[-1])
                else:
                    label_ids.append(0)
            previous_word_idx = word_idx
        return label_ids

class BERTFineTuner:
    """
    BERT模型微调器
    """
    
    def __init__(self, model_name: str = "chinese-bert-wwm-ext", 
                 model_path: str = "/home/aresen/1project/2python/hub/models/chinese-bert-wwm-ext"):
        """
        初始化微调器
        
        Args:
            model_name: 模型名称
            model_path: 本地模型路径
        """
        self.model_name = model_name
        self.model_path = model_path
        self.tokenizer = None
        self.model = None
        self.label_list = []
        self.label_to_id = {}
        self.id_to_label = {}
        
        # 尝试加载本地分词器
        try:
            logger.info(f"尝试从本地路径加载分词器: {self.model_path}")
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_path, local_files_only=True)
            logger.info("成功从本地路径加载分词器")
        except Exception as e:
            logger.warning(f"从本地路径加载分词器失败: {e}")
            try:
                logger.info(f"尝试从在线加载分词器: {self.model_name}")
                self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
                logger.info("成功从在线加载分词器")
            except Exception as e:
                logger.error(f"加载分词器失败: {e}")
                raise
    
    def prepare_data(self, train_data: List[Dict[str, Any]]) -> Tuple[List[str], List[List[int]]]:
        """
        准备训练数据
        
        Args:
            train_data: 从数据库获取的训练数据
            
        Returns:
            texts: 文本列表
            labels: 标签列表
        """
        texts = []
        labels = []
        
        # 收集所有标签
        all_labels = set()
        valid_records = 0
        
        logger.info(f"开始处理 {len(train_data)} 条训练数据")
        
        for i, data in enumerate(train_data):
            # 检查数据是否有效
            if not data.get('entity_info'):
                logger.debug(f"记录 {i} 没有 entity_info，跳过")
                continue
                
            entity_info = data['entity_info']
            # 检查entity_info是否包含必要的字段
            if isinstance(entity_info, dict) and 'text' in entity_info and 'entities' in entity_info:
                entities = entity_info['entities']
                if isinstance(entities, list):
                    valid_records += 1
                    for entity in entities:
                        if isinstance(entity, dict) and 'entityKey' in entity:
                            all_labels.add(entity['entityKey'])
                            logger.debug(f"记录 {i} 添加标签: {entity['entityKey']}")
                else:
                    logger.warning(f"记录 {i} 的 entities 不是列表类型: {type(entities)}")
            else:
                logger.warning(f"记录 {i} 的 entity_info 格式不正确: {type(entity_info)}")
        
        # 添加默认标签'O'
        all_labels.add('O')  # 非实体标签
        
        # 创建标签映射
        self.label_list = sorted(list(all_labels))
        self.label_to_id = {label: i for i, label in enumerate(self.label_list)}
        self.id_to_label = {i: label for label, i in self.label_to_id.items()}
        
        logger.info(f"标签列表: {self.label_list}")
        logger.info(f"标签到ID映射: {self.label_to_id}")
        logger.info(f"ID到标签映射: {self.id_to_label}")
        logger.info(f"有效记录数量: {valid_records}")
        logger.info(f"总标签数量: {len(self.label_list)}")
        
        # 处理每条训练数据
        processed_count = 0
        for i, data in enumerate(train_data):
            if not data.get('entity_info'):
                continue
                
            entity_info = data['entity_info']
            # 检查entity_info是否包含必要的字段
            if isinstance(entity_info, dict) and 'text' in entity_info and 'entities' in entity_info:
                text = entity_info['text']
                entities = entity_info['entities']
                
                if isinstance(entities, list):
                    # 创建标签序列，初始化为'O'标签的ID
                    label_seq = [self.label_to_id['O']] * len(text)
                    entity_count = 0
                    for entity in entities:
                        if isinstance(entity, dict) and 'entityKey' in entity and 'entityValue' in entity:
                            ner_key = entity['entityKey']
                            ner_value = entity['entityValue']
                            
                            # 检查标签是否在映射中
                            if ner_key not in self.label_to_id:
                                logger.warning(f"标签 {ner_key} 不在标签映射中，跳过")
                                continue
                                
                            # 获取标签ID
                            label_id = self.label_to_id[ner_key]
                            
                            # 简单的字符串匹配来定位实体位置
                            start_pos = text.find(ner_value)
                            if start_pos != -1:
                                end_pos = start_pos + len(ner_value)
                                # 确保标签ID在有效范围内，并且索引不越界
                                if 0 <= label_id < len(self.label_list):
                                    for j in range(start_pos, min(end_pos, len(label_seq))):
                                        if j < len(label_seq):  # 确保索引不越界
                                            label_seq[j] = label_id
                                entity_count += 1
                                logger.debug(f"在文本 '{text}' 中找到实体: {ner_key} = {ner_value} (位置 {start_pos}-{end_pos})")
                            else:
                                logger.warning(f"在文本 '{text}' 中未找到实体值 '{ner_value}'")
                    
                    if entity_count > 0:
                        texts.append(text)
                        # 确保标签序列中的所有值都是整数且在有效范围内
                        validated_label_seq = []
                        for label_id in label_seq:
                            if not isinstance(label_id, int):
                                try:
                                    label_id = int(label_id)
                                except (ValueError, TypeError):
                                    label_id = 0
                            
                            # 确保标签在有效范围内
                            if label_id < 0:
                                validated_label_seq.append(0)
                            elif label_id >= len(self.label_list):
                                validated_label_seq.append(len(self.label_list) - 1)
                            else:
                                validated_label_seq.append(label_id)
                        
                        labels.append(validated_label_seq)
                        processed_count += 1
                        logger.debug(f"成功处理记录 {i}: '{text[:50]}...' 包含 {entity_count} 个实体")
                        if i == 0:  # 只打印第一个记录的详细信息
                            logger.debug(f"记录 {i} 详细信息:")
                            logger.debug(f"  文本: {text}")
                            logger.debug(f"  标签序列长度: {len(validated_label_seq)}")
                            unique_labels = list(set(validated_label_seq))
                            logger.debug(f"  唯一标签: {unique_labels}")
        
        logger.info(f"总共处理了 {processed_count} 条有效训练数据")
        return texts, labels
    
    def fine_tune(self, train_data: List[Dict[str, Any]], 
                  output_dir: str = "./fine_tuned_model",
                  num_train_epochs: int = 10,
                  per_device_train_batch_size: int = 4,
                  save_steps: int = 50) -> str:
        """
        微调BERT模型
        
        Args:
            train_data: 训练数据
            output_dir: 输出目录
            num_train_epochs: 训练轮数
            per_device_train_batch_size: 每个设备的训练批次大小
            save_steps: 保存步骤间隔
            
        Returns:
            微调后模型的保存路径
        """
        logger.info("开始准备训练数据...")
        # 准备数据
        texts, labels = self.prepare_data(train_data)
        
        if not texts:
            logger.error("没有有效的训练数据")
            raise ValueError("没有有效的训练数据")
        
        logger.info(f"准备了 {len(texts)} 条训练数据")
        
        # 打印示例数据用于调试
        for i, (text, label_seq) in enumerate(zip(texts[:2], labels[:2])):
            logger.info(f"示例数据 {i+1}:")
            logger.info(f"  文本: {text}")
            logger.info(f"  标签序列长度: {len(label_seq)}")
            unique_labels = list(set([l for l in label_seq if l != -100]))
            logger.info(f"  唯一标签: {unique_labels}")
            # 显示前20个标签
            logger.info(f"  前20个标签: {label_seq[:20]}")
        
        # 创建数据集
        logger.info("创建训练数据集...")
        train_dataset = NERDataset(texts, labels, self.tokenizer, self.label_list)
        
        # 创建模型实例并设置标签数量
        logger.info("创建模型实例...")
        try:
            # 尝试从本地路径加载模型
            self.model = AutoModelForTokenClassification.from_pretrained(
                self.model_path, 
                num_labels=len(self.label_list),
                local_files_only=True
            )
            logger.info(f"成功从本地路径加载模型，标签数量: {len(self.label_list)}")
        except Exception as e:
            logger.warning(f"从本地路径加载模型失败: {e}")
            try:
                # 尝试从在线加载模型
                self.model = AutoModelForTokenClassification.from_pretrained(
                    self.model_name,
                    num_labels=len(self.label_list)
                )
                logger.info(f"成功从在线加载模型，标签数量: {len(self.label_list)}")
            except Exception as e:
                logger.error(f"加载模型失败: {e}")
                raise
        
        # 确保模型已正确加载
        if self.model is None:
            logger.error("模型加载失败，模型对象为None")
            raise RuntimeError("模型加载失败")
        
        logger.info(f"模型标签数量设置为: {self.model.config.num_labels}")
        
        # 检查CUDA是否可用
        use_cuda = torch.cuda.is_available()
        device_count = torch.cuda.device_count()
        logger.info(f"CUDA可用: {use_cuda}, 设备数量: {device_count}")
        
        # 如果CUDA可用，将模型移动到GPU
        if use_cuda:
            self.model = self.model.to('cuda')
            logger.info("模型已移动到GPU")
        else:
            logger.warning("CUDA不可用，将在CPU上进行训练")
        
        # 再次验证数据集中的标签
        logger.info("再次验证数据集中的标签...")
        for i in range(min(3, len(train_dataset))):  # 只验证前3个样本
            try:
                item = train_dataset[i]
                labels_tensor = item['labels']
                unique_labels = torch.unique(labels_tensor)
                logger.debug(f"数据集第 {i} 项唯一标签: {unique_labels.tolist()}")
                for label in unique_labels:
                    label_val = label.item() if hasattr(label, 'item') else label
                    if label_val != -100 and (label_val < 0 or label_val >= len(self.label_list)):
                        logger.error(f"数据集中发现无效标签: {label_val}，标签数量: {len(self.label_list)}")
                        logger.error(f"标签列表: {self.label_list}")
                        logger.error(f"标签到ID映射: {self.label_to_id}")
                        raise ValueError(f"数据集中发现无效标签: {label_val}")
            except Exception as e:
                logger.error(f"验证数据集第 {i} 项时出错: {e}")
                raise
        
        # 数据整理器
        logger.info("创建数据整理器...")
        data_collator = DataCollatorForTokenClassification(tokenizer=self.tokenizer)
        
        # 训练参数
        logger.info("设置训练参数...")
        training_args = TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=num_train_epochs,
            per_device_train_batch_size=per_device_train_batch_size,
            save_steps=save_steps,
            logging_steps=10,
            save_total_limit=2,
            logging_dir=f'{output_dir}/logs',
            logging_strategy="steps",
            overwrite_output_dir=True,
            remove_unused_columns=False,
            # 启用混合精度训练以提高性能
            fp16=use_cuda,  # 只在CUDA可用时启用fp16
            # 使用GPU进行训练
            use_cpu=not use_cuda,
            # 添加更多日志
            logging_first_step=True,
            report_to=[],
            # 添加学习率调度和权重衰减以提高泛化能力
            learning_rate=2e-5,  # 降低学习率以提高稳定性
            weight_decay=0.01,
            # 添加warmup步骤
            warmup_steps=50,
            # 梯度累积以模拟更大的批次
            gradient_accumulation_steps=2,
            # 多GPU设置
            dataloader_pin_memory=True,
            # 移除load_best_model_at_end相关参数以避免策略不匹配错误
        )
        
        # 创建训练器
        logger.info("创建训练器...")
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=train_dataset,
            data_collator=data_collator,
            tokenizer=self.tokenizer,
        )
        
        # 打印训练器配置信息
        logger.info("训练器配置信息:")
        logger.info(f"  模型标签数: {self.model.config.num_labels}")
        logger.info(f"  训练样本数: {len(train_dataset)}")
        logger.info(f"  批次大小: {per_device_train_batch_size}")
        logger.info(f"  梯度累积步数: {training_args.gradient_accumulation_steps}")
        logger.info(f"  使用CUDA: {use_cuda}")
        if use_cuda:
            logger.info(f"  CUDA设备数量: {device_count}")
            for i in range(device_count):
                logger.info(f"    设备 {i}: {torch.cuda.get_device_name(i)}")
        
        # 在训练前再次检查模型配置
        logger.info("训练前最终检查:")
        logger.info(f"  模型对象是否存在: {self.model is not None}")
        if self.model is not None:
            logger.info(f"  模型配置中的标签数: {self.model.config.num_labels}")
            logger.info(f"  模型设备: {next(self.model.parameters()).device}")
        logger.info(f"  标签列表: {self.label_list}")
        logger.info(f"  标签数量: {len(self.label_list)}")
        logger.info(f"  标签到ID映射: {self.label_to_id}")
        logger.info(f"  ID到标签映射: {self.id_to_label}")
        
        # 开始训练
        logger.info("开始微调模型...")
        logger.info(f"训练参数: epochs={num_train_epochs}, batch_size={per_device_train_batch_size}")
        trainer.train()
        
        # 保存模型
        logger.info("保存模型...")
        try:
            trainer.save_model(output_dir)
            self.tokenizer.save_pretrained(output_dir)
            logger.info("模型保存完成")
        except Exception as e:
            logger.error(f"模型保存失败: {e}")
            raise
        
        # 保存标签映射
        logger.info("保存标签映射...")
        try:
            label_mapping = {
                "label_list": self.label_list,
                "label_to_id": self.label_to_id,
                "id_to_label": self.id_to_label
            }
            
            label_mapping_path = os.path.join(output_dir, "label_mapping.json")
            with open(label_mapping_path, "w", encoding="utf-8") as f:
                json.dump(label_mapping, f, ensure_ascii=False, indent=2)
            logger.info("标签映射保存完成")
            logger.info(f"标签映射文件路径: {label_mapping_path}")
            
            # 验证标签映射文件内容
            with open(label_mapping_path, "r", encoding="utf-8") as f:
                saved_label_mapping = json.load(f)
                logger.info(f"保存的标签映射内容: {saved_label_mapping}")
        except Exception as e:
            logger.error(f"标签映射保存失败: {e}")
            raise
        
        # 验证保存的文件
        logger.info("验证保存的文件...")
        required_files = ["config.json", "pytorch_model.bin", "label_mapping.json"]
        for file in required_files:
            file_path = os.path.join(output_dir, file)
            if os.path.exists(file_path):
                logger.info(f"  ✓ {file} 已保存")
            else:
                logger.error(f"  ✗ {file} 未找到")
                # 尝试手动保存模型
                if file == "pytorch_model.bin" and self.model is not None:
                    try:
                        logger.info("尝试手动保存模型权重...")
                        model_path = os.path.join(output_dir, "pytorch_model.bin")
                        torch.save(self.model.state_dict(), model_path)
                        logger.info("手动保存模型权重完成")
                    except Exception as e:
                        logger.error(f"手动保存模型权重失败: {e}")
                        raise
        
        logger.info(f"模型微调完成，保存至: {output_dir}")
        return output_dir
    
    def load_fine_tuned_model(self, model_path: str):
        """
        加载微调后的模型
        
        Args:
            model_path: 微调模型的路径
        """
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            self.model = AutoModelForTokenClassification.from_pretrained(model_path)
            
            # 检查CUDA是否可用并移动模型
            if torch.cuda.is_available():
                self.model = self.model.to('cuda')
                logger.info("微调模型已移动到GPU")
            
            # 加载标签映射
            label_mapping_path = os.path.join(model_path, "label_mapping.json")
            if os.path.exists(label_mapping_path):
                with open(label_mapping_path, "r", encoding="utf-8") as f:
                    label_mapping = json.load(f)
                    self.label_list = label_mapping["label_list"]
                    self.label_to_id = label_mapping["label_to_id"]
                    self.id_to_label = label_mapping["id_to_label"]
                    logger.info("成功从label_mapping.json加载标签映射")
                    logger.info(f"标签映射内容: {label_mapping}")
            else:
                logger.warning(f"标签映射文件不存在: {label_mapping_path}")
                # 尝试从模型配置中获取标签数量
                if hasattr(self.model.config, 'num_labels'):
                    # 创建默认标签
                    self.label_list = [f"LABEL_{i}" for i in range(self.model.config.num_labels)]
                    self.label_to_id = {label: i for i, label in enumerate(self.label_list)}
                    self.id_to_label = {i: label for label, i in self.label_to_id.items()}
                    logger.info("从模型配置创建默认标签映射")
            
            # 确保标签映射一致性
            self._ensure_label_mapping_consistency()
            
            logger.info(f"成功加载微调模型: {model_path}")
            logger.info(f"标签列表: {self.label_list}")
            logger.info(f"标签到ID映射: {self.label_to_id}")
            logger.info(f"ID到标签映射: {self.id_to_label}")
            logger.info(f"标签数量: {len(self.label_list)}")
        except Exception as e:
            logger.error(f"加载微调模型失败: {e}")
            raise
    
    def _ensure_label_mapping_consistency(self):
        """
        确保标签映射的一致性
        """
        # 确保id_to_label映射完整
        for i, label in enumerate(self.label_list):
            if i not in self.id_to_label:
                self.id_to_label[i] = label
        
        # 确保label_to_id映射完整
        for i, label in enumerate(self.label_list):
            if label not in self.label_to_id:
                self.label_to_id[label] = i

# 测试代码
if __name__ == "__main__":
    # 注意：这个测试需要实际的训练数据才能运行
    print("BERTFineTuner模块已创建")
    print("请使用实际的训练数据来微调模型")