# BBC 文章分类
# 数据集来源: https://www.kaggle.com/competitions/learn-ai-bbc/
# 代码参考: https://mp.weixin.qq.com/s/00on_zUFjAmnoSb_8j0QMw

import torch
from torch import nn
from transformers import BertModel, AutoTokenizer
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import random_split
import pandas as pd
from torch.optim import Adam
from tqdm import tqdm
import time

import sys
sys.path.append('..')
from settings import BERT_PATH

from transformers import BertForSequenceClassification
from static_var import bert_cls_path

import pandas as pd
import wandb

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = {
    "batch": 16,
    "epochs": 1,
    "device": device,
    "dataset": "...csv",
    "model_path": BERT_PATH,
    "new_model": "wuhan_cls.pth",
    "output": 88, # 分类类别数
    "warmup_steps": 500,
}
device = config["device"]
tokenizer = AutoTokenizer.from_pretrained(config["model_path"])


class CSVDataset(Dataset):
    def __init__(self, csv_file, feature_column, target_column, tokenizer):
        self.data = pd.read_csv(csv_file)
        self.target_column = target_column
        self.feature_column = feature_column
        self.tokenizer = tokenizer
        self.targets = self.data[target_column].values
        self.data = self.data.drop(columns=[target_column])

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        sample = self.data.loc[idx, self.feature_column]
        target = self.targets[idx]
        # sample = torch.tensor(sample.values, dtype=torch.float32)
        target = torch.tensor(target, dtype=torch.long)  # 分类标签通常是长整型

        return sample, target


# def split_dataset(dataset):
#     trainset, testset = random_split(dataset, [0.95, 0.05])
#     return trainset, testset


csv_file = config["dataset"]
feature_column = "text"
target_column = "num_label"
dataset = CSVDataset(csv_file, feature_column, target_column, tokenizer)
train_dataset = dataset


def collate_fn(item):
    feature, label = zip(*item)
    feature = tokenizer(
        feature,
        padding="max_length",
        max_length=512,
        truncation=True,
        return_tensors="pt",
    )
    label = torch.stack(label)
    return feature, label


train_dataloader = DataLoader(
    train_dataset,
    batch_size=config["batch"],
    shuffle=True,
    collate_fn=collate_fn,
    pin_memory=True,
)


model = BertForSequenceClassification.from_pretrained(
    config["model_path"],  # Use the 12-layer BERT model, with an uncased vocab.
    num_labels=config["output"],
    output_attentions=False,  # Whether the model returns attentions weights.
    output_hidden_states=False,  # Whether the model returns all hidden-states.
)
model.cuda()

from transformers import AdamW, get_linear_schedule_with_warmup

optimizer = AdamW(
    model.parameters(),
    lr=2e-5,  # args.learning_rate - default is 5e-5, our notebook had 2e-5
    eps=1e-8,  # args.adam_epsilon  - default is 1e-8.
)

total_steps = len(train_dataloader)

# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=config["warmup_steps"],  # Default value in run_glue.py
    num_training_steps=total_steps,
)


def run_one_epoch(
    model,
    dataloader,
    is_train,
    optimizer,
    scheduler,
    wandb,
    batch=config["batch"],
):
    def run():
        start = time.time()
        total_loss = 0
        total_acc = 0
        nums = len(dataloader) * batch
        for feature, label in tqdm(dataloader):
            label = label.to(device)
            if is_train:
                optimizer.zero_grad()
            input = {k: v.squeeze(1).to(device) for k, v in feature.items()}
            output = model(**input, labels=label)
            loss = output["loss"]
            logits = output["logits"]
            if is_train:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
                optimizer.step()
                scheduler.step()
            total_loss += loss.item()
            acc = (logits.argmax(dim=-1) == label.view(-1)).sum().item()
            wandb.log(
                {"acc": acc, "loss": loss.item(), "lr": scheduler.get_last_lr()[0]}
            )
            total_acc += acc
            
        end = time.time()
        print("耗费时间：", end - start, "秒")
        return total_loss / nums, total_acc / nums

    if is_train:
        model.train()
        return run()

    if not is_train:
        model.eval()
        with torch.no_grad():
            return run()


def train(
    model,
    optimizer,
    scheduler,
    wandb,
    epochs=config["epochs"],
):
    min_loss = float("inf")
    for epoch_num in range(epochs):
        train_loss, train_acc = run_one_epoch(
            model, train_dataloader, True, optimizer, scheduler, wandb
        )
        print(f"Epoch: {epoch_num + 1}")
        print(f"Train loss: {train_loss:.4f}, Train acc: {train_acc:.4f}")

        if train_loss < min_loss:
            min_loss = train_loss
            print(epoch_num, "save model")
            torch.save(model.state_dict(), config["new_model"])


# start a new wandb run to track this script
wandb.init(
    # set the wandb project where this run will be logged
    project="pku_industry",
    # track hyperparameters and run metadata
    config={
        "architecture": "BERT",
        "epochs": 10,
    },
)
train(model, optimizer, scheduler, wandb)
wandb.finish()