import random
from transformers import AutoTokenizer, default_data_collator, AutoModelForSequenceClassification, DistilBertModel, get_scheduler
from datasets import load_from_disk, load_dataset
import torch.utils.data
import dltools

tokenizer = AutoTokenizer.from_pretrained(
    "distilbert-base-uncased",
    use_fast=True,
    cache_dir="./MNIST/cache",
    force_download=False)
print(f"tokenizer:{tokenizer}")
# HF_ENDPOINT=https://hf-mirror.com
res = tokenizer.batch_encode_plus(["Web text classification is one of the most important research topics"])
print(f"res:{res}")
"""
dataset = load_dataset(
    path="glue",
    name="cola",
    cache_dir="./MNIST/cache"
)
print(f"dataset:{dataset}")
"""


class ColaDataset(torch.utils.data.Dataset):

    def __init__(self, is_test=False):
        super().__init__()
        with open("./MNIST/cola/CoLA.csv", "r", encoding="utf-8") as csv:
            lines = csv.readlines()[1:]
            split_index = int(len(lines) * 0.9)
            random.shuffle(lines)
            if is_test:
                lines = lines[split_index:]
            else:
                lines = lines[:split_index]
            self.examples = []
            for line in lines:
                cells = line.split(",")
                text = cells[4].strip().replace("\"", "")
                example = {"text": text, "label": int(cells[2].strip())}
                self.examples.append(example)

    def __getitem__(self, item):
        return self.examples[item]

    def __len__(self):
        return len(self.examples)


dataset = ColaDataset()
print(f"dataset[0]:{dataset[0]}")


def collate_fn(datas):
    texts = [data["text"] for data in datas]
    # print(f"texts:{texts}")
    batch_examples = tokenizer.batch_encode_plus(texts, truncation=True, padding=True, return_tensors="pt")
    # print(f"datas:{datas}")
    batch_examples["labels"] = torch.tensor([data["label"] for data in datas])
    return batch_examples


train_loader = torch.utils.data.DataLoader(
    dataset=dataset,
    batch_size=8,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True
)

for data in train_loader:
    print(f"data:{data}")
    break


class TextClassificationModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrained = DistilBertModel.from_pretrained("distilbert-base-uncased",
                                                          cache_dir="./MNIST/cache",
                                                          force_download=False)
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(768, 768),
            torch.nn.ReLU(),
            torch.nn.Dropout(0.1),
            torch.nn.Linear(768, 2)
        )
        pretrained_parameters = AutoModelForSequenceClassification.from_pretrained(
            "distilbert-base-uncased",
            num_labels=2)
        #print(f"pretrained_parameters:{pretrained_parameters}")
        self.fc[0].load_state_dict(pretrained_parameters.pre_classifier.state_dict())
        self.fc[3].load_state_dict(pretrained_parameters.classifier.state_dict())
        self.loss_fn = torch.nn.CrossEntropyLoss()

    def forward(self, input_ids, attention_mask, labels=None):
        logits = self.pretrained(input_ids=input_ids, attention_mask=attention_mask)
        # 支去第一位就可以了，自注意力每一位都包含了整句话的信息
        logits = logits.last_hidden_state[:, 0]
        logits = self.fc(logits)
        loss = None
        if labels is not None:
            loss = self.loss_fn(logits, labels)
        return {"loss": loss, "logits": logits}


model = TextClassificationModel()
print(f"参数量:{sum(i.numel() for i in model.parameters())}")

device = dltools.try_gpu()

for data in train_loader:
    out = model(**data)
    print(f"out:{out}")
    break


def _test(model:torch.nn.Module):
    model.to(device)
    model.eval()
    test_loader = torch.utils.data.DataLoader(
        dataset=ColaDataset(is_test=True),
        batch_size=8,
        collate_fn=collate_fn,
        shuffle=False,
        drop_last=False
    )

    outs = []
    labels = []
    for i, data in enumerate(test_loader):
        with torch.no_grad():
            input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
            out = model(input_ids, attention_mask)
        outs.extend(out["logits"].argmax(dim=1))
        labels.extend(data["labels"])
        if i % 10 == 0:
            print(i)
    outs = torch.tensor(outs).to(device)
    labels = torch.tensor(labels).to(device)
    accuracy = (outs == labels).cpu().sum() / len(labels)
    print(f"accuracy:{accuracy:.3f}")

_test(model)


def train(model:torch.nn.Module):
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    scheduler = get_scheduler(name="linear",
                              num_warmup_steps=10,
                              num_training_steps=len(train_loader),
                              optimizer=optimizer
                              )
    model = model.to(device)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
        labels = data["labels"].to(device)
        optimizer.zero_grad()
        model.zero_grad()
        out = model(input_ids=input_ids, attention_mask= attention_mask, labels=labels)
        loss = out["loss"]
        loss.backward()

        optimizer.step()
        # scheduler.step()要在optimizer.step()之后不然会有警告
        scheduler.step()
        if i % 50 == 0:
            logits = out["logits"].argmax(dim=1)
            accuracy = (labels == logits).sum().item() / len(labels)
            print(f"{i} loss:{loss} accuracy:{accuracy}")

train(model)
_test(model)