import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizerFast, BertForTokenClassification
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import os

from tqdm import tqdm

# 设置随机种子
def set_seed(seed):
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    
set_seed(42)

# 配置参数
class Config:
    model_name = "bert-base-chinese"  # 使用预训练的中文BERT模型
    max_length = 256  # 最大序列长度
    batch_size = 16
    epochs = 5
    learning_rate = 2e-5
    weight_decay = 0.01
    warmup_steps = 0
    device = "cuda" if torch.cuda.is_available() else "cpu"
    output_dir = "models"
    
config = Config()

# 加载数据
def load_processed_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        return json.load(f)

# 定义数据集类
class TargetExtractionDataset(Dataset):
    def __init__(self, data, tokenizer, max_length):
        self.data = data
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.label_map = {'O': 0, 'B-TARGET': 1, 'I-TARGET': 2}
        
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        sentence = item['sentence']
        labels = [self.label_map[label] for label in item['labels']]
        
        # 使用BERT tokenizer进行编码
        encoding = self.tokenizer(
            sentence,
            truncation=True,
            max_length=self.max_length,
            padding='max_length',
            return_tensors='pt'
        )
        
        # 将编码转换为单个样本而非批次
        input_ids = encoding['input_ids'].squeeze(0)
        attention_mask = encoding['attention_mask'].squeeze(0)
        
        # 对标签进行对齐处理
        token_labels = [-100] * self.max_length  # -100是huggingface模型中用于忽略的标签值
        
        # 用于字符级别到token级别的对齐
        word_ids = encoding.word_ids()
        previous_word_id = None
        label_ids = []
        
        for word_id in word_ids:
            # 特殊标记如[CLS], [SEP]
            if word_id is None:
                label_ids.append(-100)
            # 只为每个原始字符的第一个token分配标签
            elif word_id != previous_word_id:
                if word_id < len(labels):
                    label_ids.append(labels[word_id])
                else:
                    label_ids.append(-100)
            # 同一个字符的其他子token
            else:
                # 对于BIO标注，保持I标签
                if labels[word_id] == self.label_map['B-TARGET']:
                    label_ids.append(self.label_map['I-TARGET'])
                else:
                    label_ids.append(labels[word_id])
            previous_word_id = word_id
            
        # 确保标签长度与输入一致
        label_ids = label_ids[:self.max_length]
        label_ids += [-100] * (self.max_length - len(label_ids))
        
        return {
            'input_ids': input_ids,
            'attention_mask': attention_mask,
            'labels': torch.tensor(label_ids)
        }

# 加载模型和tokenizer
def load_model_and_tokenizer(num_labels):
    tokenizer = BertTokenizerFast.from_pretrained(config.model_name)
    model = BertForTokenClassification.from_pretrained(
        config.model_name, 
        num_labels=num_labels
    )
    return model, tokenizer

# 评估函数
def evaluate(model, dataloader, device):
    model.eval()
    predictions = []
    true_labels = []
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask
            )
            
            logits = outputs.logits
            pred = torch.argmax(logits, dim=2)
            
            # 仅考虑非-100标签
            for i in range(labels.shape[0]):
                for j in range(labels.shape[1]):
                    if labels[i, j].item() != -100:
                        true_labels.append(labels[i, j].item())
                        predictions.append(pred[i, j].item())
    
    # 计算指标
    accuracy = accuracy_score(true_labels, predictions)
    precision, recall, f1, _ = precision_recall_fscore_support(
        true_labels, predictions, average='weighted'
    )
    
    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1
    }

# 训练函数
def train():
    # 加载数据
    train_data = load_processed_data('data/train_processed.json')
    val_data = load_processed_data('data/val_processed.json')
    
    # 加载模型和tokenizer
    model, tokenizer = load_model_and_tokenizer(num_labels=3)  # O, B-TARGET, I-TARGET
    model.to(config.device)
    
    # 创建数据集
    train_dataset = TargetExtractionDataset(train_data, tokenizer, config.max_length)
    val_dataset = TargetExtractionDataset(val_data, tokenizer, config.max_length)
    
    # 创建数据加载器
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    val_dataloader = DataLoader(val_dataset, batch_size=config.batch_size)
    
    # 优化器
    optimizer = AdamW(model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
    
    # 计算总训练步数
    total_steps = len(train_dataloader) * config.epochs
    
    # 创建学习率调度器
    scheduler = get_linear_schedule_with_warmup(
        optimizer, 
        num_warmup_steps=config.warmup_steps, 
        num_training_steps=total_steps
    )
    
    # 训练循环
    best_f1 = 0
    for epoch in range(config.epochs):
        model.train()
        total_loss = 0
        
        # 进度条
        progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}/{config.epochs}")
        
        for batch in progress_bar:
            input_ids = batch['input_ids'].to(config.device)
            attention_mask = batch['attention_mask'].to(config.device)
            labels = batch['labels'].to(config.device)
            
            # 梯度归零
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )
            
            loss = outputs.loss
            total_loss += loss.item()
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            
            # 参数更新
            optimizer.step()
            scheduler.step()
            
            # 更新进度条
            progress_bar.set_postfix({'loss': f"{loss.item():.4f}"})
        
        # 计算平均损失
        avg_train_loss = total_loss / len(train_dataloader)
        
        # 评估
        val_metrics = evaluate(model, val_dataloader, config.device)
        
        print(f"Epoch {epoch+1}/{config.epochs}")
        print(f"训练损失: {avg_train_loss:.4f}")
        print(f"验证准确率: {val_metrics['accuracy']:.4f}")
        print(f"验证精确率: {val_metrics['precision']:.4f}")
        print(f"验证召回率: {val_metrics['recall']:.4f}")
        print(f"验证F1分数: {val_metrics['f1']:.4f}")
        
        # 保存最佳模型
        if val_metrics['f1'] > best_f1:
            best_f1 = val_metrics['f1']
            
            # 创建输出目录
            if not os.path.exists(config.output_dir):
                os.makedirs(config.output_dir)
            
            # 保存模型和tokenizer
            model.save_pretrained(os.path.join(config.output_dir, "best_model"))
            tokenizer.save_pretrained(os.path.join(config.output_dir, "tokenizer"))
            
            print(f"最佳模型已保存，F1: {best_f1:.4f}")

if __name__ == "__main__":
    train()