import sys, os, time
import random, numpy as np
import torch, torch.nn as nn
import shutil, logging, json
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertConfig
from transformers import BertForSequenceClassification, AdamW

from dataloader import QNLIDataset


def evaluate_model(model, test_dataloader, device):
    model.eval()
    all_correct = 0
    all_total = 0

    with torch.no_grad():
        for batch in tqdm(test_dataloader, desc="Evaluating"):
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            logits = outputs.logits

            # 计算准确率等指标
            _, preds = torch.max(logits, dim=1)
            correct = (preds == labels).sum().item()
            total = labels.size(0)
            all_correct += correct
            all_total += total

    accuracy = all_correct / all_total
    print(f"Accuracy: {accuracy}")
    return accuracy


def train_model(model, train_dataloader, test_dataloader, device, epochs=3):
    optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
    model.train()
    iter = 0
    for epoch in range(epochs):
        total_loss = 0
        for batch in train_dataloader:
            iter += 1
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            optimizer.zero_grad()

            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss
            
            total_loss += loss.item()

            loss.backward()
            optimizer.step()

            print("Epoch: {}, iter: {}, loss : {:.4f}".format(
                epoch, iter, loss.item()
            ))

            if iter % 100 == 0:
                acc = evaluate_model(model, test_dataloader, device)
                print(f"Iteration {iter}, Loss: {loss.item()}")

            # avg_loss = total_loss / len(train_dataloader)
            

# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = BertForSequenceClassification.from_pretrained("./bert_en", num_labels=2).to(device)
# optimizer = AdamW(model.parameters(), lr=2e-5)


# tokenizer = BertTokenizer.from_pretrained("./bert_en")
# train_dataset = QNLIDataset("./glue_data/QNLI/train.tsv", tokenizer, num_samples=200)
# test_dataset = QNLIDataset("./glue_data/QNLI/dev.tsv", tokenizer, num_samples=200)
# # print(len(train_dataset))
# train_dataloader = DataLoader(train_dataset, batch_size=20, shuffle=True)
# test_dataloader = DataLoader(test_dataset, batch_size=20, shuffle=False)
# train_model(model, train_dataloader, optimizer, device, epochs=3)
# evaluate_model(model, test_dataloader, device)