import pandas as pd
from sklearn.model_selection import train_test_split
from nlpx.tokenize import utils, PaddingTokenizer
from nlpx.model.classifier import TextCNNClassifier
from nlpx.model.wrapper import TextModelWrapper, SplitTextModelWrapper, PaddingTextModelWrapper, SplitPaddingTextModelWrapper

FILE = '~/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv'


if __name__ == '__main__':
	data = pd.read_csv(FILE, encoding='GBK')
	texts, labels, classes = utils.get_df_text_labels(data, text_col='故障描述', label_col='故障标志')
	
	stop_words = ['的', '了']
	tokenizer = PaddingTokenizer(texts=texts, min_freq=5, cut_type='word', stop_words=stop_words)
	
	model = TextCNNClassifier(64, vocab_size=tokenizer.vocab_size, num_classes=len(classes))
	
	# model_wrapper = SplitTextModelWrapper(model, tokenizer, classes=classes)
	# model_wrapper.train(texts, labels, batch_size=64)

	# model = TextCNNClassifier(64, vocab_size=tokenizer.vocab_size, num_classes=len(classes))
	# model_wrapper = SplitPaddingTextModelWrapper(model, tokenizer, classes=classes)
	# model_wrapper.train(texts, labels, batch_size=64)

	X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=0.2)
	
	model = TextCNNClassifier(64, vocab_size=tokenizer.vocab_size, num_classes=len(classes))
	
	model_wrapper = TextModelWrapper(model, tokenizer, classes=classes)
	history = model_wrapper.train_evaluate(X_train, y_train, val_data=(X_test, y_test), batch_size=64)
	# print(history)

	# model_wrapper = PaddingTextModelWrapper(model, tokenizer, classes=classes)
	# model_wrapper.train(X_train, y_train, val_data=(X_test, y_test), batch_size=64)
	
	print(model_wrapper.evaluate(X_test, y_test))
	