from torch.utils.data import IterableDataset
import json

learning_rate = 1e-5
epoch_num = 3
batch_size = 4

class IterableAFQMC(IterableDataset):
    def __init__(self, data_file):
        self.data_file = data_file
        self._len = sum(1 for _ in open(data_file, 'rt'))

    def __iter__(self):
        with open(self.data_file, 'rt') as f:
            for line in f:
                sample = json.loads(line.strip())
                yield sample

    def __len__(self):
        return self._len

train_data = IterableAFQMC('./afqmc_public/train.json')
valid_data = IterableAFQMC('./afqmc_public/dev.json')

from torch.utils.data import DataLoader
from transformers import AutoTokenizer
import torch

checkpoint = "bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)

def collote_fn(batch_samples):
    batch_sentence_1, batch_sentence_2 = [], []
    batch_label = []
    for sample in batch_samples:
        batch_sentence_1.append(sample['sentence1'])
        batch_sentence_2.append(sample['sentence2'])
        batch_label.append(int(sample['label']))
    X = tokenizer(
        batch_sentence_1, 
        batch_sentence_2, 
        padding=True, 
        truncation=True, 
        return_tensors="pt"
    )
    y = torch.tensor(batch_label)
    return X, y

train_dataloader = DataLoader(train_data, batch_size=batch_size, collate_fn=collote_fn)
valid_dataloader= DataLoader(valid_data, batch_size=batch_size, collate_fn=collote_fn)

from torch import nn
from transformers import BertForSequenceClassification 

model = BertForSequenceClassification.from_pretrained(checkpoint, num_labels=2)

def train_loop(dataloader, model, loss_fn, optimizer, lr_scheduler, epoch, total_loss):
    finish_step_num = (epoch-1)*len(dataloader)
    
    model.train()
    for step, (X, y) in enumerate(dataloader, start=1):
        pred = model(**X).logits
        loss = loss_fn(pred, y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        total_loss += loss.item()
        print(f'{total_loss} {finish_step_num} {step} loss: {total_loss/(finish_step_num + step):>7f}')
    return total_loss

def test_loop(dataloader, model, mode='Test'):
    assert mode in ['Valid', 'Test']
    size = len(dataloader.dataset)
    correct = 0

    model.eval()
    with torch.no_grad():
        for X, y in dataloader:
            pred = model(**X).logits
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()

    correct /= size
    print(f"{mode} Accuracy: {(100*correct):>0.1f}%\n")

from torch.optim import AdamW
from transformers import get_scheduler

num_training_steps = epoch_num * len(train_dataloader)
optimizer = AdamW(model.parameters(), lr=learning_rate)
lr_scheduler = get_scheduler(
    "linear",
    optimizer=optimizer,
    num_warmup_steps=0,
    num_training_steps=num_training_steps,
)

loss_fn = nn.CrossEntropyLoss()

total_loss = 0.
for t in range(epoch_num):
    print(f"Epoch {t+1}/{epoch_num}\n-------------------------------")
    total_loss = train_loop(train_dataloader, model, loss_fn, optimizer, lr_scheduler, t+1, total_loss)
    test_loop(valid_dataloader, model, mode='Valid')
print("Done!")