import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, get_scheduler
from torch.optim import AdamW
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from bert_classifier import BERTClassifier
from utils import load_jsonl, saveJsonl


class EssayDataset(Dataset):
    def __init__(self, data, tokenizer, max_length=512):
        self.data = data
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __getitem__(self, idx):
        text = self.data[idx]['text']
        label = int(self.data[idx].get('generated', 0))
        item_id = self.data[idx].get('id', '')  # 保留 id
        inputs = self.tokenizer(text, padding='max_length', truncation=True,
                                max_length=self.max_length, return_tensors='pt')
        return {
            'input_ids': inputs['input_ids'][0],
            'attention_mask': inputs['attention_mask'][0],
            'label': torch.tensor(label),
            'id': item_id
        }

    def __len__(self):
        return len(self.data)


def train_model(model, train_loader, optimizer, scheduler, device):
    model.train()
    loss_fn = torch.nn.CrossEntropyLoss()
    for batch in tqdm(train_loader, desc="Training"):
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['label'].to(device)

        outputs = model(input_ids, attention_mask)
        loss = loss_fn(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()


def evaluate_model(model, loader, device):
    model.eval()
    all_probs, all_ids = [], []
    with torch.no_grad():
        for batch in tqdm(loader, desc="Evaluating"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            outputs = model(input_ids, attention_mask)
            probs = torch.softmax(outputs, dim=1)[:, 1].cpu().numpy()
            all_probs.extend(probs)
            all_ids.extend(batch['id'])  # 直接使用 id 列表
    return [{'id': i, 'generated': float(p)} for i, p in zip(all_ids, all_probs)]


def calc_roc_auc(model, loader, device):
    model.eval()
    all_labels, all_probs = [], []
    with torch.no_grad():
        for batch in loader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].cpu().numpy()
            outputs = model(input_ids, attention_mask)
            probs = torch.softmax(outputs, dim=1)[:, 1].cpu().numpy()
            all_labels.extend(labels)
            all_probs.extend(probs)
    auc = roc_auc_score(all_labels, all_probs)
    return auc


def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    tokenizer = BertTokenizer.from_pretrained("./model")

    train_data = load_jsonl('./data/train.jsonl')
    test_data = load_jsonl('./data/test.jsonl')

    train_dataset = EssayDataset(train_data, tokenizer)
    test_dataset = EssayDataset(test_data, tokenizer)

    train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False)

    model = BERTClassifier().to(device)
    optimizer = AdamW(model.parameters(), lr=2e-5)
    scheduler = get_scheduler("linear", optimizer=optimizer,
                              num_warmup_steps=0,
                              num_training_steps=len(train_loader) * 3)

    for epoch in range(5):
        print(f"\nEpoch {epoch + 1}/5")
        train_model(model, train_loader, optimizer, scheduler, device)
        auc = calc_roc_auc(model, train_loader, device)
        print(f"Train ROC-AUC: {auc:.4f}")

    predictions = evaluate_model(model, test_loader, device)
    saveJsonl('./test_pred.jsonl', predictions)


if __name__ == '__main__':
    main()
