from datasets import load_dataset, load_from_disk
from transformers import BertTokenizer, BertModel, get_scheduler
import torch
import torch.utils.data

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', cache_dir='./MNIST/cache', force_download=False)
print(f'tokenizer:{tokenizer}')
out = tokenizer.batch_encode_plus(
    batch_text_or_text_pairs=[
        ('11月4日凌晨，神舟十八号载人飞船返回舱在东风着陆场成功着陆，三名航天员安全返回。',
         '从神舟五号到神舟十八号，每一次凯旋，航天英雄们都在返回舱前留下了振奋人心的话语！'),
        ('感动！骄傲！从神五到神十八，', '历次航天员“回家”心声回顾！')
    ],
    truncation=True,
    padding='max_length',
    max_length=512,
    return_tensors='pt',
    return_length=True
)
for k, v in out.items():
    print(f'{k}:{v}')

print(tokenizer.decode(out['input_ids'][0]))


class CCDataset(torch.utils.data.Dataset):
    def __init__(self, split):
        self.dataset = load_from_disk('./MNIST/ChnSentiCorp')[split]

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, item):
        return self.dataset[item]['text'], self.dataset[item]['label']


dataset = CCDataset('train')
print(f'{len(dataset)}')
print(f'dataset[1]:{dataset[1]}')

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f'device:{device}')


def collate_fn(data):
    texts = [i[0] for i in data]
    labels = [i[1] for i in data]
    tokenized = tokenizer.batch_encode_plus(
        batch_text_or_text_pairs=texts,
        truncation=True,
        padding='max_length',
        max_length=512,
        return_tensors='pt',
        return_length=True
    )
    input_ids = tokenized['input_ids'].to(device)
    attention_mask = tokenized['attention_mask'].to(device)
    token_type_ids = tokenized['token_type_ids'].to(device)  # 0表示第一个句子 1表示第二个句子
    labels = torch.LongTensor(labels).to(device)
    return input_ids, attention_mask, token_type_ids, labels


data = [
    ('神舟十八号载人飞船返回舱在东风着陆场成功着陆', 1),
    ('航天英雄们都在返回舱前留下了振奋人心的话语', 0)
]
print(f'collate_fn(data):{collate_fn(data)}')
train_loader = torch.utils.data.DataLoader(
    dataset=dataset,
    batch_size=8,
    collate_fn=collate_fn,
    drop_last=True
)
print(f'len(train_loader):{len(train_loader)}')

for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(train_loader):
   print(f'input_ids.shape:{input_ids.shape}')
   print(f'attention_mask.shape:{attention_mask.shape}')
   print(f'token_type_ids.shape:{token_type_ids.shape}')
   print(f'labels.shape:{labels.shape}')
   break

'''
bert_model = BertModel.from_pretrained('bert-base-chinese', cache_dir='./MNIST/cache', force_download=False)
print(f'{sum(p.numel() for p in bert_model.parameters())}')
for p in bert_model.parameters():
    p.requires_grad_(False)
bert_model.to(device)
for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(train_loader):
    out = bert_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
    print(f'out.last_hidden_state.shape:{out.last_hidden_state.shape}')
    break
'''


class CCModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.bert_model = BertModel.from_pretrained('bert-base-chinese', cache_dir='./MNIST/cache', force_download=False)
        for p in self.bert_model.parameters():
            p.requires_grad_(False)
        self.fc = torch.nn.Linear(in_features=768, out_features=2)

    def forward(self, input_ids, attention_mask, token_type_ids):
        with torch.no_grad():
            out = self.bert_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        fc_inputs = out.last_hidden_state[:, 0] # 句子分类任务自用第一个位置的输出，因为是自主义里每个位置都包含有整个句子的信息
        return self.fc(fc_inputs)

model = CCModel()
for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(train_loader):
    out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
    print(f'out:{out}')
    break


def train(model: CCModel):
    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-4)
    loss_fn = torch.nn.CrossEntropyLoss()
    scheduler = get_scheduler(
        name='linear',
        num_warmup_steps=0,
        num_training_steps=len(train_loader),
        optimizer=optimizer
    )
    model.train()
    for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(train_loader):
        optimizer.zero_grad()
        model.zero_grad()
        out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        loss = loss_fn(out, labels)
        loss.backward()
        optimizer.step()
        scheduler.step()
        if i % 10 == 0:
            out = out.argmax(dim=1)
            accuracy = (out == labels).sum().item()/len(labels)
            lr = optimizer.state_dict()['param_groups'][0]['lr']
            print(f'{i} loss:{loss:.3f} accuracy:{accuracy} lr:{lr}')

        if i > 10:
            break

train(model)


def test(model: CCModel):
    test_loader = torch.utils.data.DataLoader(
        dataset=CCDataset('test'),
        collate_fn=collate_fn,
        batch_size=8,
        shuffle=False,
        drop_last=False
    )
    model.eval()
    correct = 0
    total = 0
    for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(test_loader):
        if i == 5:
            break
        with torch.no_grad():
            out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        out = out.argmax(dim=1)
        correct += (out == labels).sum().item()
        total += len(labels)
    print(f'accuracy:{correct/total:.3f}')

test(model)