import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from transformers import BertTokenizer
from nlpx.tokenize.utils import get_df_text_labels
from nlpx.dataset import TextDataset
from transformers_model import ErnieCNNTokenClassifier, AutoCNNTokenClassifier,BertDataset, BertCollator, \
    BertTokenizeCollator
from nlpx.model.wrapper import ClassifyModelWrapper

pretrained_path = "albert_chinese_tiny"
# pretrained_path = "/Users/summy/project/python/parttime/33/ernie-3.0-base-zh"
file = "~/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv"


if __name__ == '__main__':
    tokenizer = BertTokenizer.from_pretrained(pretrained_path)
    df = pd.read_csv(file, encoding="GBK")
    texts, labels, classes = get_df_text_labels(
        df, text_col="故障描述", label_col="故障标志"
    )

    train_texts, test_texts, y_train, y_test = train_test_split(texts, labels, test_size=0.2, random_state=42)

    ###################################################################################################################
    train_tokenizies = tokenizer.batch_encode_plus(
        train_texts,
        max_length=66,
        padding="max_length",
        truncation=True,
        return_token_type_ids=True,
        return_attention_mask=True,
        return_tensors="pt",
    )

    test_tokenizies = tokenizer.batch_encode_plus(
        test_texts,
        max_length=66,
        padding="max_length",
        truncation=True,
        return_token_type_ids=True,
        return_attention_mask=True,
        return_tensors="pt",
    )

    train_set = BertDataset(train_tokenizies, y_train)
    test_set = BertDataset(test_tokenizies, y_test)

    model = AutoCNNTokenClassifier(pretrained_path, len(classes))
    wrapper = ClassifyModelWrapper(model, classes)
    _ = wrapper.train(train_set, test_set, collate_fn=BertCollator(), early_stopping_rounds=3)
    ###################################################################################################################

    train_set = TextDataset(train_texts, y_train)
    test_set = TextDataset(test_texts, y_test)
    model = AutoCNNTokenClassifier(pretrained_path, len(classes))
    wrapper = ClassifyModelWrapper(model, classes)
    _ = wrapper.train(train_set, test_set, collate_fn=BertTokenizeCollator(tokenizer))
