import numpy as np
import pandas as pd
import polars as pl
import torch
from torch import nn
from sklearn.model_selection import train_test_split
from nlpx.model.classifier import TextCNNClassifier
from model_wrapper import ModelWrapper, SimpleModelWrapper, SplitModelWrapper, ClassifyModelWrapper, SimpleClassifyModelWrapper, SplitClassifyModelWrapper
from nlpx.tokenize import PaddingTokenizer
from nlpx.dataset import TokenDataset, PaddingTokenCollator
from nlpx.tokenize.utils import get_df_text_labels


if __name__ == '__main__':
	df = pl.read_csv('/Users/summy/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv', encoding='GBK')
	texts, labels, classes = get_df_text_labels(df, text_col='故障描述', label_col='故障标志')
	print('labels:', labels.dtype)
	# tokenize_vec = AlbertTokenizeVec(pretrained_path)
	# model = TextCNN(tokenize_vec.hidden_size, out_features=len(classes))
	
	tokenizer = PaddingTokenizer(texts=texts, cut_type='char')
	X = tokenizer.batch_encode(df['故障描述'].to_list())
	model = TextCNNClassifier(100, vocab_size=tokenizer.vocab_size, layer_norm=False, num_classes=len(classes))
	# model = nn.DataParallel(model)
	
	# wrapper = SplitModelWrapper(model)
	# wrapper.train(X, labels.astype(np.int64), epochs=100, early_stopping_rounds=1, val_size=0.2)

	# wrapper = SplitClassifyModelWrapper(model)
	# wrapper.train_evaluate(X, labels, epochs=100, early_stopping_rounds=1, val_size=0.2)
	
	X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2, random_state=42)

	# wrapper = SimpleModelWrapper(model)
	wrapper = SimpleClassifyModelWrapper(model, classes=classes)
	wrapper.train(X_train, y_train.astype(np.int64), val_data=(X_test, y_test.astype(np.int64)), 
			   show_progress=False, early_stopping_rounds=3, T_max=3, monitor='val_loss', )
	# wrapper.classification_report(X_test, y_test)

	train_set = TokenDataset(X_train, y_train)
	test_set = TokenDataset(X_test, y_test)
	# wrapper2 = ModelWrapper(model)
	wrapper2 = ClassifyModelWrapper(model, classes=classes)
	wrapper2.train(train_set, val_set=test_set, show_progress=True, early_stopping_rounds=3, 
							collate_fn=PaddingTokenCollator(tokenizer.pad), amp=True, amp_dtype=torch.bfloat16)
	# wrapper.classification_report(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad))
	# wrapper.confusion_matrix(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad))

	wrapper2.evaluate(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad))
	print('='*100)
	ClassifyModelWrapper.evaluate(wrapper, test_set, collate_fn=PaddingTokenCollator(tokenizer.pad))

	# result = wrapper.predict_classes_proba(tests, batch_size=1)
	# print(result)
