import torch
import pandas as pd
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from sklearn.model_selection import train_test_split
from nlpx.dataset import TextVecCollator, TokenizeCollator, TextDataset, TextDFDataset
from nlpx.llm import AlbertTokenizeVec, ErnieTokenizeVec, BertTokenizeVec
from nlpx.text_token import TokenEmbedding
from nlpx.model import TextCNN
from nlpx.model.wrapper import ClassifyModelWrapper

pretrained_path = r'/Users/summy/data/albert_small_zh'
pretrained_path2 = r'/Users/summy/data/shizhan/sgns.weibo.word.bz2'


if __name__ == '__main__':
	df = pd.read_csv('~/project/python/parttime/text_gcn/data/北方地区不安全事件统计20240331.csv', encoding='GBK')
	df['故障标志'] = df['故障标志'].astype('category')
	classes = df['故障标志'].cat.categories.tolist()
	df['故障标志'] = df['故障标志'].cat.codes
	# labels = df['故障标志'].to_numpy()

	# print(type(df[['故障描述', '故障标志']].values[0][1]))
	# print(type(df[['故障描述', '故障标志']].loc[0].values))

	tokenize_vec = AlbertTokenizeVec(pretrained_path)

	# tokenizer = TokenEmbedding(pretrained_path2)
	# tokenizer = BertTokenizer.from_pretrained(pretrained_path)

	# dataset = TextDataset(df['故障描述'], df['故障标志'])
	# data_loader = DataLoader(dataset, batch_size=64, shuffle=True, collate_fn=TextVecCollator(tokenize_vec))
	# for x, y in data_loader:
	# 	print(x.shape, y)
		# break

	# dataset = TextDFDataset(df[['故障描述', '故障标志']])
	# data_loader = DataLoader(dataset, batch_size=64, shuffle=True, collate_fn=TextVecCollator(tokenizer))
	# for x, y in data_loader:
	# 	print(x.shape, y)
	# 	break

	# data_loader = DataLoader(dataset, batch_size=64, shuffle=True, collate_fn=TokenizeCollator(tokenizer))
	# for r in data_loader:
	# 	print(r)
	# 	break

	train_data, test_data = train_test_split(df[['故障描述', '故障标志']], test_size=0.2)
	train_set = TextDFDataset(train_data)
	test_set = TextDFDataset(test_data)
	text_cnn = TextCNN(tokenize_vec.hidden_size, out_features=len(classes))
	model = ClassifyModelWrapper(classes=classes)
	model.train(text_cnn, train_set, test_set, early_stopping_rounds=1, collate_fn=TextVecCollator(tokenize_vec))

	test_texts = ['昆明航B737飞机执行昆明至西安航班 西安机场进近过程中  机组将飞机襟翼位置设置错误触发近地警告事件',
				  '海航 B737飞机执行汉中至广州航班 飞机起飞后发现汉中过站期间货舱有730KG货物未卸机']
	input_ids = tokenize_vec.encode_plus(test_texts)
	print(model.predict_classes_proba(input_ids))
