import numpy as np
import pandas as pd
import torch
from torch import nn
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertConfig, AutoModel, ErnieConfig
from nlpx.tokenize import PaddingTokenizer
from nlpx.model import TextCNN
from nlpx.dataset import (
    TokenDataset,
    PaddingTokenCollator,
    TextDFDataset,
    TokenizeCollator,
)
from nlpx.model.classifier import RNNAttentionClassifier, TextCNNClassifier
from nlpx.model.wrapper import ClassifyModelWrapper
from transformers import BertTokenizer
from nlpx.tokenize.utils import convert_labels, get_text_length, get_df_text_labels


class BertDataset(Dataset):

    def __init__(self, tokenizies: dict[str, torch.Tensor], labels: np.ndarray):
        super().__init__()
        self.tokenizies = tokenizies
        self.labels = labels

    def __getitem__(self, index: int):
        return {k: v[index] for k, v in self.tokenizies.items()}, self.labels[index]

    def __len__(self):
        return len(self.labels)


class BertCollator:

    def __call__(self, examples):
        tokenizes, labels = zip(*examples)
        new_tokenizes = {k: [] for k in tokenizes[0].keys()}
        for d in tokenizes:
            for k, v in d.items():
                new_tokenizes[k].append(v)
        # tokenizes = {k: torch.stack([v[i] for i in range(len(v))]) for k, v in tokenizes}
        new_tokenizes = {k: torch.stack(v) for k, v in new_tokenizes.items()}
        
        labels = torch.tensor(labels, dtype=torch.long)
        return new_tokenizes, labels


if __name__ == "__main__":
    pretrained_path = "/Users/summy/project/python/parttime/33/ernie-3.0-base-zh"
    # config = ErnieConfig.from_pretrained(pretrained_path)
    # print(config.hidden_size)
    embed_dim = 128
    file = "~/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv"
    df = pd.read_csv(file, encoding="GBK")
    texts, labels, classes = get_df_text_labels(
        df, text_col="故障描述", label_col="故障标志"
    )
    tokenizer = BertTokenizer.from_pretrained(pretrained_path)
    tokenizies = tokenizer.batch_encode_plus(
        texts[:10],
        max_length=68,
        padding="max_length",
        truncation=True,
        return_token_type_ids=True,
        return_attention_mask=True,
        return_tensors="pt",
    )

    dataset = BertDataset(tokenizies, labels[:10])
    dataloader = DataLoader(dataset, batch_size=4, collate_fn=BertCollator())
    for t, label in dataloader:
        # print(t, label)
        print(t)
        break
    for k, v in tokenizies.items():
        print(k, v.shape)
        break
