import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from torch.optim import AdamW
from model import BertSpamClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tqdm import tqdm

# 配置
PRETRAINED_MODEL = 'bert-base-uncased'
BATCH_SIZE = 16
EPOCHS = 2
LR = 2e-5
MAX_LEN = 128
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 1. 数据集定义
class SpamDataset(Dataset):
    def __init__(self, df, tokenizer, max_len):
        self.texts = (df['text'] + ' [SEP] ' + df['cot']).tolist()
        self.labels = df['label'].map({'ham': 0, 'spam': 1}).tolist()
        self.tokenizer = tokenizer
        self.max_len = max_len
    def __len__(self):
        return len(self.texts)
    def __getitem__(self, idx):
        encoding = self.tokenizer(
            self.texts[idx],
            truncation=True,
            padding='max_length',
            max_length=self.max_len,
            return_tensors='pt'
        )
        return {
            'input_ids': encoding['input_ids'].squeeze(0),
            'attention_mask': encoding['attention_mask'].squeeze(0),
            'label': torch.tensor(self.labels[idx], dtype=torch.long)
        }

# 2. 数据加载
print('加载数据...')
df = pd.read_csv('data/sms_spam_cot.csv')
train_df, val_df = train_test_split(df, test_size=0.1, random_state=42, stratify=df['label'])
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL)
train_dataset = SpamDataset(train_df, tokenizer, MAX_LEN)
val_dataset = SpamDataset(val_df, tokenizer, MAX_LEN)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

# 3. 模型
model = BertSpamClassifier(PRETRAINED_MODEL).to(DEVICE)
optimizer = AdamW(model.parameters(), lr=LR)
loss_fn = torch.nn.CrossEntropyLoss()

# 4. 训练
for epoch in range(EPOCHS):
    model.train()
    total_loss = 0
    for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}/{EPOCHS} - Training'):
        optimizer.zero_grad()
        input_ids = batch['input_ids'].to(DEVICE)
        attention_mask = batch['attention_mask'].to(DEVICE)
        labels = batch['label'].to(DEVICE)
        logits = model(input_ids, attention_mask)
        loss = loss_fn(logits, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    print(f'Epoch {epoch+1} Loss: {total_loss/len(train_loader):.4f}')

    # 验证
    model.eval()
    preds, trues = [], []
    with torch.no_grad():
        for batch in tqdm(val_loader, desc='Validating'):
            input_ids = batch['input_ids'].to(DEVICE)
            attention_mask = batch['attention_mask'].to(DEVICE)
            labels = batch['label'].to(DEVICE)
            logits = model(input_ids, attention_mask)
            pred = torch.argmax(logits, dim=1).cpu().numpy()
            preds.extend(pred)
            trues.extend(labels.cpu().numpy())
    acc = accuracy_score(trues, preds)
    print(f'Validation Accuracy: {acc:.4f}')

# 5. 保存模型
torch.save(model.state_dict(), 'spam_bert_cot.pth')
print('模型已保存为 spam_bert_cot.pth') 