import pandas as pd
import torch
from datetime import datetime
from torch import nn
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR
from nlpx.tokenize import Tokenizer, PaddingTokenizer, utils
from sklearn.model_selection import train_test_split
from nlpx.dataset import TokenizeCollator, TextDFDataset, TokenDataset, SameLengthTokenDataset, PaddingTokenCollator
from nlpx.model import RNNAttention, TextCNN
from nlpx.model.wrapper import ModelWrapper, ClassifyModelWrapper
from nlpx.model.embedding import CNNEmbedding
from nlpx.model.classifier import CNNRNNAttentionClassifier, RNNAttentionClassifier, \
	RNNCNNAttentionClassifier, ResRNNCNNAttentionClassifier, TextCNNClassifier, RotaryAttentionClassifier
from nlpx.tokenize.utils import convert_labels, get_text_length, get_df_text_labels
	
	
# def get_text_labels(df: pd.DataFrame, text_col: str, label_col: str, min_length: int = None, max_length: Optional[int] = None):
# 	if min_length or max_length:
# 		df['length'] = df[text_col].apply(lambda x: utils.get_text_length(x))
# 	if min_length:
# 		df = df[df['length'] >= min_length]
# 	if max_length:
# 		df = df[df['length'] <= max_length]
		
# 	_labels, _classes = utils.convert_labels(df[label_col])
# 	_texts = df[text_col].values
# 	return _texts, _labels, _classes
	
	
if __name__ == '__main__':
	embed_dim = 128
	# data = pd.read_csv('~/data/spamham.csv')
	# texts, labels, classes = get_text_labels(data, text_col='Message', label_col='Category', min_length=4)
	
	file = '~/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv'
	data = pd.read_csv(file, encoding='GBK')
	texts, labels, classes = get_df_text_labels(data, text_col='故障描述', label_col='故障标志')
	
	# data = pd.read_csv('~/data/shizhan/train.csv')
	# data.drop(columns=['id', 'label'], inplace=True)
	# data['length'] = data['sentence'].apply(lambda x: get_text_length(x))
	# data = data[data['length'] > 15]
	# texts, labels, classes = handle_text(data.sample(2000), text_col='sentence', label_col='label_desc')
	# del data
	
	stop_words = [str(i) for i in range(1)]
	# stop_words.extend(['a', 'the', 'is'])
	stop_words.extend(['-', '的', '一', '是', '了', '年', '月', '日'])
	# tokenizer = Tokenizer(corpus=texts, min_freq=10, lang='en', stop_words=stop_words)
	# tokenizer = PaddingTokenizer(texts=texts, min_freq=5, cut_type='word', stop_words=stop_words, word_freq=False)
	tokenizer = PaddingTokenizer.from_texts(texts, min_freq=5, cut_type='char', stop_words=stop_words, word_freq=False)
	# tokenizer.add_stop_words(['的'])
	# tokenizer.save()
	
	# tokenizer = Tokenizer.load()
	print(datetime.now())
	ids = tokenizer.batch_encode(texts, padding=False)
	print('batch_encode', datetime.now())
	X_train, X_test, y_train, y_test = train_test_split(ids, labels, test_size=0.2)
	train_set = TokenDataset(X_train, y_train)
	del X_train, y_train
	test_set = TokenDataset(X_test, y_test)
	# del X_test, y_test
	
	test_texts = ['昆明航B737飞机执行昆明至西安航班 西安机场进近过程中  机组将飞机襟翼位置设置错误触发近地警告事件',
	              '海航 B737飞机执行汉中至广州航班 飞机起飞后发现汉中过站期间货舱有730KG货物未卸机']
	# test_texts = ['Go until jurong point, crazy.. Available only in bugis n great world la e buffet... Cine there got amore wat...',
	#               'WINNER!! As a valued network customer you have been selected to receivea £900 prize reward! To claim call 09061701461.']
	inputs = tokenizer.batch_encode(test_texts)
	
	# 后面的上限比TextCNN高
	# classifier = TextCNNClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), activation=nn.LeakyReLU(inplace=True))
	# 英文 分词, 不用residual 效果可能更好
	classifier = RNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), padding_idx=tokenizer.PAD_ID)
	# classifier = CNNRNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), bidirectional=False)
	# classifier = RNNCNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), bidirectional=True)
	# classifier = ResRNNCNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes))

	optimizer = torch.optim.AdamW(classifier.parameters(), lr=0.01)
	# optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.5, dampening=0.5)
	# scheduler = StepLR(optimizer, step_size=10, gamma=0.01)
	scheduler = CosineAnnealingLR(optimizer, T_max=3)
	# model_wrapper = ModelWrapper(classifier)
	model_wrapper = ClassifyModelWrapper(classifier, classes=classes)
	model_wrapper.train(train_set, test_set, early_stopping_rounds=5, num_workers=0,
	                    optimizer=optimizer, scheduler=scheduler, monitor='val_loss',
	                    collate_fn=PaddingTokenCollator(tokenizer.pad, return_sequence_length=True))

	print(model_wrapper.predict_proba(torch.tensor(inputs, dtype=torch.long)))
	print(model_wrapper.predict_classes_proba(torch.tensor(inputs, dtype=torch.long)))
	print(model_wrapper.evaluate(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad, return_sequence_length=True)))

	# #################################################################
	classifier2 = TextCNNClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), activation=nn.SiLU(inplace=True))
	# # 英文 分词, 不用residual 效果可能更好
	# # classifier2 = RNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), residual=False)
	# # classifier2 = CNNRNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), bidirectional=False)
	# # classifier2 = RNNCNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes), bidirectional=True)
	# # classifier3 = ResRNNCNNAttentionClassifier(embed_dim, vocab_size=tokenizer.vocab_size, num_classes=len(classes))

	model_wrapper2 = ClassifyModelWrapper(classifier2, classes=classes)
	model_wrapper2.train_evaluate(train_set, test_set, early_stopping_rounds=5, T_max=5, show_progress=True,num_workers=0,
	                     collate_fn=PaddingTokenCollator(tokenizer.pad, return_sequence_length=False))
# 
	print(model_wrapper2.predict_classes_proba(torch.tensor(inputs, dtype=torch.long)))
	print(model_wrapper2.evaluate(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad)))
	
	# # #################################################################
	# classifier3 = RotaryAttentionClassifier(embed_dim, 80, vocab_size=tokenizer.vocab_size, num_classes=len(classes), add_cnn=True)
	
	# model_wrapper3 = ClassifyModelWrapper(classifier3, classes=classes)
	# model_wrapper3.train(train_set, test_set, early_stopping_rounds=5, T_max=5, show_progress=False,num_workers=0,
	#                      collate_fn=PaddingTokenCollator(tokenizer.pad, return_sequence_length=False))
	
	# print(model_wrapper3.predict_classes_proba(torch.tensor(inputs, dtype=torch.long)))
	# print(model_wrapper3.evaluate(test_set, collate_fn=PaddingTokenCollator(tokenizer.pad)))
