# from transformers import BertTokenizer
from datasets import load_dataset
from datasets import load_from_disk
import os

import tools
from tools import get_tokenizer
import torch
# from datasets import load_dataset
from datasets import load_from_disk
from tools import WithTimer
from bdtime import tt


#定义数据集
class Dataset(torch.utils.data.Dataset):
    def __init__(self, split):
        # self.dataset = load_dataset(path='lansinuote/ChnSentiCorp', split=split)
        self._data_dist_path = './data/ChnSentiCorp'
        self.data_dist_path = os.path.join(self._data_dist_path, split)
        assert os.path.exists(self.data_dist_path), f'路径不存在? data path: {self.data_dist_path}'
        self.dataset = load_from_disk(self.data_dist_path)
        # self.dataset = self.datasets[split]

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, i):
        text = self.dataset[i]['text']
        label = self.dataset[i]['label']

        return text, label


# 定义下游任务模型
class Model(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.fc = torch.nn.Linear(768, 2)

    def forward(self, input_ids, attention_mask, token_type_ids):
        with torch.no_grad():
            out = pretrained(input_ids=input_ids,
                             attention_mask=attention_mask,
                             token_type_ids=token_type_ids)

        out = self.fc(out.last_hidden_state[:, 0])

        out = out.softmax(dim=1)

        return out


if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('device=', device)

    dataset = Dataset('train')

    len(dataset), dataset[0]

    token = get_tokenizer()


    def collate_fn(data):
        sents = [i[0] for i in data]
        labels = [i[1] for i in data]

        # 编码
        data = token.batch_encode_plus(batch_text_or_text_pairs=sents,
                                       truncation=True,
                                       padding='max_length',
                                       max_length=500,
                                       return_tensors='pt',
                                       return_length=True)

        # input_ids:编码之后的数字
        # attention_mask:是补零的位置是0,其他位置是1
        input_ids = data['input_ids'].to(device)
        attention_mask = data['attention_mask'].to(device)
        token_type_ids = data['token_type_ids'].to(device)
        labels = torch.LongTensor(labels).to(device)

        # print(data['length'], data['length'].max())

        return input_ids, attention_mask, token_type_ids, labels


    # 数据加载器
    loader = torch.utils.data.DataLoader(dataset=dataset,
                                         batch_size=16,
                                         collate_fn=collate_fn,
                                         shuffle=True,
                                         drop_last=True)

    for i, (input_ids, attention_mask, token_type_ids,
            labels) in enumerate(loader):
        break

    print('--- len(loader):', len(loader))
    print('shapes for [input_ids, attention_mask, token_type_ids, labels]:\n', input_ids.shape, attention_mask.shape, token_type_ids.shape, labels)

    from tools import get_pretrained_model__bert

    # 加载预训练模型
    pretrained = get_pretrained_model__bert()
    pretrained.to(device)

    # 不训练,不需要计算梯度
    for param in pretrained.parameters():
        param.requires_grad_(False)

    with WithTimer("模型试算", tt):
        # 模型试算
        out = pretrained(input_ids=input_ids,
                         attention_mask=attention_mask,
                         token_type_ids=token_type_ids)

        print('input_ids.shape:', input_ids.shape)
        print('last_hidden_shape:', out.last_hidden_state.shape)

    model = Model()
    model.to(device)

    # import tools
    # tools.DEBUG = False
    with WithTimer("计算模型输出shape", tt):
        shape_model = model(input_ids=input_ids,
              attention_mask=attention_mask,
              token_type_ids=token_type_ids).shape

        print('shape_model:', shape_model)

    from transformers import AdamW


    tt.__init__()
    # 训练
    optimizer = AdamW(model.parameters(), lr=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    max_times = 100
    model.train()
    for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(loader):
        out = model(input_ids=input_ids,
                    attention_mask=attention_mask,
                    token_type_ids=token_type_ids)

        loss = criterion(out, labels)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        if i % 5 == 0:
            out = out.argmax(dim=1)
            accuracy = (out == labels).sum().item() / len(labels)

            print('i, loss.item(), accuracy:', i, round(loss.item(), 3), round(accuracy, 3), '--- mean_cost:', round(tt.now() / (i + 1), 3), ', now:', tt.now())

        if i == max_times:
            break

    print(f'max_times: {max_times}, total cost time:', tt.now())







