# encoding: utf-8
# @Time:    :2024/12/20 21:28

import pickle
import traceback
from tqdm import tqdm
import numpy as np
import pandas as pd

import torch
from torch.utils import data
from torch import nn
from transformers import AlbertModel, AlbertConfig, AlbertTokenizer, AlbertTokenizerFast
# from transformers import BertModel, BertConfig, BertTokenizer, BertTokenizerFast

pretrained_model = r"E:/codes/nlp_about/pretrained_model/albert-base-chinese-finetuned"
# pretrained_model = "ckiplab/albert-base-chinese"

try:
    tokenizer = AlbertTokenizerFast.from_pretrained(pretrained_model)
    print("use tokenizer fast")
except Exception as e:
    print(traceback.format_exc())
    tokenizer = AlbertTokenizer.from_pretrained(pretrained_model)
    print("use tokenizer")

bert_model = AlbertModel.from_pretrained(pretrained_model)
bert_model_config = AlbertConfig.from_pretrained(pretrained_model)

# configs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using device:", device)
max_length = 64
batch_size = 256

info_labels = []
info_labels_len = 2  # num classes


def get_label(x):
    global info_labels
    if isinstance(x, str):
        if x not in info_labels:
            info_labels.append(x)
        return info_labels.index(x)
    else:
        return info_labels[x]


class MyDataset(data.Dataset):
    def __init__(self, file_path: str):
        self.datas = []
        self.labels = []
        self.load_data(file_path)

    def load_data(self, file_path: str):

        df = pd.read_csv(file_path, dtype=str)
        dicts = df.to_dict("records")

        for one_data in tqdm(dicts):
            text = one_data.get("sentence")
            label = one_data.get("label")
            if not text or not label:
                continue
            token_ids = tokenizer.encode(text.strip(), max_length=max_length, padding="max_length", truncation="longest_first")
            label_id = get_label(label)
            self.datas.append(token_ids)
            self.labels.append(label_id)

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        return np.array(self.datas[index]), np.array(self.labels[index])


class ClassificationModel(nn.Module):
    def __init__(self, _bert_model, _bert_model_config, num_class: int, drop_out: float = 0.3):
        super(ClassificationModel, self).__init__()
        self.bert_model = _bert_model
        self.dropout = nn.Dropout(drop_out)
        self.fc1 = nn.Linear(_bert_model_config.hidden_size, num_class)
        self.activate = nn.LeakyReLU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # 句向量 [batch_size,hidden_size]
        bert_out = self.dropout(bert_out)
        bert_out = self.activate(bert_out)
        bert_out = self.fc1(bert_out)  # [batch_size,num_class]
        return bert_out


model = ClassificationModel(bert_model, bert_model_config, info_labels_len)
model.to(device)


def save_datasets():
    global info_labels
    train_path = "./datas/train.csv"
    train_datasets = MyDataset(train_path)
    test_path = "./datas/dev.csv"
    test_datasets = MyDataset(test_path)

    with open("./datas/train.pkl", "wb") as f:
        pickle.dump(train_datasets, f)

    with open("./datas/test.pkl", "wb") as f:
        pickle.dump(test_datasets, f)

    print("num classes:", len(info_labels))
    # num classes: 1227


def load_datasets():
    with open("./datas/train.pkl", "rb") as f:
        train_datasets = pickle.load(f)

    with open("./datas/test.pkl", "rb") as f:
        test_datasets = pickle.load(f)

    return train_datasets, test_datasets


def print_model():
    """
print(model)
ClassificationModel(
  (bert_model): AlbertModel(
    (embeddings): AlbertEmbeddings(
      (word_embeddings): Embedding(21128, 128, padding_idx=0)
      (position_embeddings): Embedding(512, 128)
      (token_type_embeddings): Embedding(2, 128)
      (LayerNorm): LayerNorm((128,), eps=1e-12, elementwise_affine=True)
      (dropout): Dropout(p=0, inplace=False)
    )
    (encoder): AlbertTransformer(
      (embedding_hidden_mapping_in): Linear(in_features=128, out_features=768, bias=True)
      (albert_layer_groups): ModuleList(
        (0): AlbertLayerGroup(
          (albert_layers): ModuleList(
            (0): AlbertLayer(
              (full_layer_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
              (attention): AlbertSdpaAttention(
                (query): Linear(in_features=768, out_features=768, bias=True)
                (key): Linear(in_features=768, out_features=768, bias=True)
                (value): Linear(in_features=768, out_features=768, bias=True)
                (attention_dropout): Dropout(p=0, inplace=False)
                (output_dropout): Dropout(p=0, inplace=False)
                (dense): Linear(in_features=768, out_features=768, bias=True)
                (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
              )
              (ffn): Linear(in_features=768, out_features=3072, bias=True)
              (ffn_output): Linear(in_features=3072, out_features=768, bias=True)
              (activation): ReLU()
              (dropout): Dropout(p=0, inplace=False)
            )
          )
        )
      )
    )
    (pooler): Linear(in_features=768, out_features=768, bias=True)
    (pooler_activation): Tanh()
  )
  (dropout): Dropout(p=0.3, inplace=False)
  (fc1): Linear(in_features=768, out_features=1227, bias=True)
  (activate): LeakyReLU(negative_slope=0.01)
)

    """
    print(model)
    print(model.bert_model.dtype)  # torch.float32


if __name__ == "__main__":
    save_datasets()
    # print_model()
