'''
这个例子运行时会报错，原因就是Autokenizer的分词结果和example中的不一致。
例如，巴黎多云12℃／21℃，examples中的分词结果是['巴', 黎', '多', 云', '1', '2', '℃', '/', '2', '1', '℃']，
而Autokenizer的分词结果是['巴', '黎', '多', 云', '12', '##℃', '/', '21', '##℃']。
仅供学习参考。。。。。。
'''


import torch
from torch.utils.data import DataLoader, Dataset

torch.manual_seed(7)

categories = set()
class MyDataset(Dataset):
    def __init__(self, data_file):
        super().__init__()
        self.data = self.__load_data(data_file)

    def __load_data(self, data_file):
        Data = {}
        idx = 0
        tokenidx = 0
        sentence = ''
        labels = []
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                if (line == '\n'):
                    Data[idx] = {'sentence' : sentence, 'labels' : labels}
                    idx += 1
                    sentence = ''
                    tokenidx = 0
                    labels = []
                else:
                    char, label = line.strip().split()
                    sentence += char
                    if (label.startswith('B-')):
                        ctg = label[2:]
                        labels.append([tokenidx, tokenidx, char, ctg])
                        categories.add(ctg)
                    elif (label.startswith('I-')):
                        labels[-1][1] = tokenidx
                        labels[-1][2] += char
                    else:
                        categories.add(label)
                    tokenidx += 1
        return Data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

train_data = MyDataset('example.train')
test_data = MyDataset('example.test')

id2lables = {0:'O'}
for c in list(sorted(categories)):  # 这里本来是不需要排序的。但是为了与网上的教程一致，还是排序一下。
    if (c != 'O'):
        id2lables[len(id2lables)] = f'B-{c}'
        id2lables[len(id2lables)] = f'I-{c}'
label2ids = {v:k for k,v in id2lables.items()}

from transformers import AutoTokenizer

checkpoint = "bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)

from torch.utils.data import DataLoader
def collote_fn(batch_samples):
    batch_sentence, batch_tags  = [], []
    for sample in batch_samples:
        batch_sentence.append(sample['sentence'])
        batch_tags.append(sample['labels'])
    batch_inputs = tokenizer(       # 如果tokenizer的分词结果和example中的不一致怎么办？
        batch_sentence, 
        padding=True, 
        return_tensors="pt"
    )

    batch_label = torch.where(torch.isin(batch_inputs['input_ids'], torch.tensor([tokenizer.cls_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id])), -100, 0)
    for i, tag in enumerate(batch_tags):
        for j, t in enumerate(tag):
            start, end, _, ctg = t
            batch_label[i][start + 1] = label2ids[f'B-{ctg}']
            batch_label[i][start + 2 : end + 2] = label2ids[f'I-{ctg}']
    return batch_inputs, batch_label.clone()

train_dataloader = DataLoader(train_data, batch_size=4, shuffle=True, collate_fn=collote_fn)
test_dataloader = DataLoader(test_data, batch_size=4, shuffle=False, collate_fn=collote_fn)

from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained(
    checkpoint,
    id2label=id2lables,
    label2id=label2ids,
)

def train_loop(dataloader, model, loss_fn, optimizer, lr_scheduler, epoch, total_loss):
    finish_batch_num = (epoch-1) * len(dataloader)
    
    model.train()
    for batch, (X, y) in enumerate(dataloader, start=1):
        pred = model(**X)
        loss = loss_fn(pred.logits.permute(0, 2, 1), y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        total_loss += loss.item()
        print(f'{batch}/{len(dataloader)}, loss: {total_loss/(finish_batch_num + batch):>7f}')
    return total_loss

def eval_loop(dataloader, model):
    true_labels, true_predictions = [], []

    model.eval()
    with torch.no_grad():
        for batch, (X, y) in enumerate(dataloader):
            pred = model(**X)
            predictions = pred.logits.argmax(dim=-1).cpu().numpy().tolist()
            labels = y.cpu().numpy().tolist()
            true_labels += [[id2lables[int(l)] for l in label if l != -100] for label in labels]
            true_predictions += [
                [id2lables[int(p)] for (p, l) in zip(prediction, label) if l != -100]
                for prediction, label in zip(predictions, labels)
            ]
        print(f'true_predictions: {true_predictions * 100/len(dataloader):>2f}%')

from torch.optim import AdamW
from transformers import get_scheduler

learning_rate = 1e-5
epoch_num = 3
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = AdamW(model.parameters(), lr=learning_rate)
lr_scheduler = get_scheduler(
    "linear",
    optimizer=optimizer,
    num_warmup_steps=0,
    num_training_steps=epoch_num*len(train_dataloader),
)

total_loss = 0.
for t in range(epoch_num):
    print(f"Epoch {t+1}/{epoch_num}\n-------------------------------")
    total_loss = train_loop(train_dataloader, model, loss_fn, optimizer, lr_scheduler, t+1, total_loss)
    eval_loop(test_dataloader, model)
print("Done!")

