# 环境准备
import torch
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from sklearn.metrics import accuracy_score, f1_score
import pandas as pd
import numpy as np
from torch.cuda.amp import autocast, GradScaler
import json

# 设备设置
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")

# 数据预处理
class OffensiveDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len
        self.cache = {}  # 数据缓存

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        if idx in self.cache:
            return self.cache[idx]
            
        text = str(self.texts[idx])
        label = self.labels[idx]

        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )

        item = {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'label': torch.tensor(label, dtype=torch.long)
        }
        
        self.cache[idx] = item
        return item

# 加载数据（修改部分）
def load_json_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)  # 直接加载整个JSON数组
    
    # 转换为DataFrame并提取所需字段
    return pd.DataFrame({
        'TEXT': [item['content'] for item in data],
        'label': [item['toxic'] for item in data]
    })

train_df = load_json_data('train.json')  # 替换为实际路径
test_df = load_json_data('test.json')    # 替换为实际路径

# 检查数据分布
print("Class distribution in training set:")
print(train_df.label.value_counts())

# 使用更优的中文BERT模型
MODEL_NAME = 'bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)

# 创建带类别平衡的数据加载器（修改部分）
def create_data_loader(df, tokenizer, max_len, batch_size, sampler=None):
    ds = OffensiveDataset(
        texts=df.TEXT.to_numpy(),
        labels=df.label.to_numpy(),
        tokenizer=tokenizer,
        max_len=max_len
    )
    return DataLoader(
        ds,
        batch_size=batch_size,
        sampler=sampler,
        num_workers=4,
        pin_memory=True
    )

# 计算类别权重
class_counts = train_df.label.value_counts().to_list()
num_samples = sum(class_counts)
class_weights = [num_samples / (2 * count) for count in class_counts]
weights = [class_weights[label] for label in train_df.label]
sampler = WeightedRandomSampler(weights, num_samples=len(train_df), replacement=True)

BATCH_SIZE = 16
MAX_LEN = 128

train_loader = create_data_loader(train_df, tokenizer, MAX_LEN, BATCH_SIZE, sampler=sampler)
test_loader = create_data_loader(test_df, tokenizer, MAX_LEN, BATCH_SIZE)

# 模型定义
model = BertForSequenceClassification.from_pretrained(
    MODEL_NAME,
    num_labels=2
).to(device)
torch.backends.cudnn.benchmark = True

# 训练配置
EPOCHS = 5
optimizer = AdamW(model.parameters(), lr=3e-5, weight_decay=0.01)
scaler = GradScaler()
best_f1 = 0

# 训练函数（保持不变）
def train_epoch(model, data_loader, optimizer, device):
    model.train()
    losses = []
    correct_predictions = 0

    for d in data_loader:
        input_ids = d['input_ids'].to(device, non_blocking=True)
        attention_mask = d['attention_mask'].to(device, non_blocking=True)
        labels = d['label'].to(device, non_blocking=True)

        optimizer.zero_grad()

        with autocast():
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )

        loss = outputs.loss
        logits = outputs.logits

        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()

        _, preds = torch.max(logits, dim=1)
        correct_predictions += torch.sum(preds == labels)
        losses.append(loss.item())

    return correct_predictions.double() / len(data_loader.dataset), np.mean(losses)

# 评估函数（保持不变）
def eval_model(model, data_loader, device):
    model.eval()
    losses = []
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for d in data_loader:
            input_ids = d['input_ids'].to(device, non_blocking=True)
            attention_mask = d['attention_mask'].to(device, non_blocking=True)
            labels = d['label'].to(device, non_blocking=True)

            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels
            )

            loss = outputs.loss
            logits = outputs.logits

            _, preds = torch.max(logits, dim=1)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            losses.append(loss.item())

    f1 = f1_score(all_labels, all_preds)
    acc = accuracy_score(all_labels, all_preds)
    return acc, np.mean(losses), f1

# 训练循环（保持不变）
for epoch in range(EPOCHS):
    print(f'Epoch {epoch + 1}/{EPOCHS}')
    print('-' * 10)

    train_acc, train_loss = train_epoch(model, train_loader, optimizer, device)
    test_acc, test_loss, test_f1 = eval_model(model, test_loader, device)

    print(f'Train Loss: {train_loss:.4f} | Acc: {train_acc:.4f}')
    print(f'Test  Loss: {test_loss:.4f} | Acc: {test_acc:.4f} | F1: {test_f1:.4f}')

    if test_f1 > best_f1:
        torch.save(model.state_dict(), './model/best_model.bin')
        best_f1 = test_f1
        print(f'New best model saved with F1: {best_f1:.4f}')
    else:
        print(f"No improvement, best F1 remains: {best_f1:.4f}")
        # break

# 加载最佳模型
model.load_state_dict(torch.load('./model/best_model.bin'))
model = model.to(device)

# 预测函数（保持不变）
def predict(text, model, tokenizer, device, max_len=128):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=max_len,
        return_token_type_ids=False,
        padding='max_length',
        truncation=True,
        return_attention_mask=True,
        return_tensors='pt',
    )

    input_ids = encoding['input_ids'].to(device)
    attention_mask = encoding['attention_mask'].to(device)

    with torch.no_grad():
        outputs = model(input_ids=input_ids, attention_mask=attention_mask)

    logits = outputs.logits
    _, prediction = torch.max(logits, dim=1)
    
    return '攻击性言论' if prediction.item() == 1 else '正常言论'

# 测试示例
test_texts = [
    "我们小区也有个金发碧眼的小萝莉 家长们都挺喜欢的",
    "如果政策上恢复黑人昆仑奴的一切待遇,我想这黑人会被人间蒸发"
]

for text in test_texts:
    print(f'"{text}" => {predict(text, model, tokenizer, device)}')