import pandas as pd
import torch
from torch import nn
from sklearn.model_selection import train_test_split
from nlpx.dataset import TokenizeCollator, TextDFDataset, TokenDataset, SameLengthTokenDataset, PaddingTokenCollator
from nlpx.text_token import TokenEmbedding, Tokenizer
from nlpx.model import TextCNN, RNNAttention
from nlpx.model.wrapper import ClassifyModelWrapper
from nlpx.model.classifier import EmbeddingClassifier, TextCNNClassifier, RNNAttentionClassifier


pretrained_path2 = r'/Users/summy/data/shizhan/sgns.weibo.word.bz2'


class Classifier(nn.Module):

	def __init__(self, embed_dim: int, num_classes: int, embedding=None, num_embeddings: int = None):
		super().__init__()
		self.embedding = embedding if embedding else nn.Embedding(num_embeddings, embed_dim)
		# self.classifier = TextCNN(embed_dim, out_features=num_classes)
		self.classifier = RNNAttention(embed_dim, num_heads=2, out_features=num_classes)

	def forward(self, input_ids, labels=None):
		embedding = self.embedding(input_ids)
		return self.classifier(embedding, labels)


if __name__ == '__main__':
	df = pd.read_csv('~/project/python/parttime/text_gcn/data/北方地区不安全事件统计20240331.csv', encoding='GBK')
	df['故障标志'] = df['故障标志'].astype('category')
	classes = df['故障标志'].cat.categories.tolist()
	df['故障标志'] = df['故障标志'].cat.codes
	texts = df['故障描述'].values
	labels = df['故障标志']

	# tokenizer = TokenEmbedding(pretrained_path2)
	# tokenizer.save("weibo.vocab.txt")
	tokenizer = Tokenizer(corpus=texts, min_freq=10)
	# tokenizer = Tokenizer.load("weibo.vocab.txt")
	print('Tokenizer loaded')

	# train_data, test_data = train_test_split(df[['故障描述', '故障标志']], test_size=0.2)
	# train_set = TextDFDataset(train_data)
	# test_set = TextDFDataset(test_data)

	# tokens = tokenizer.batch_encode(df['故障描述'])
	# X_train, X_test, y_train, y_test = train_test_split(tokens, df['故障标志'], test_size=0.2)
	# train_set = SameLengthTokenDataset(X_train, y_train)
	# test_set = SameLengthTokenDataset(X_test, y_test)

	# import jieba
	# import logging
	# jieba.setLogLevel(logging.INFO)
	#
	# for word in tokenizer.vocab[len(tokenizer.reserved_token):]:
	# 	jieba.add_word()

	tokens = tokenizer.batch_encode(texts, padding=False)
	print('batch_encode')
	X_train, X_test, y_train, y_test = train_test_split(tokens, labels, test_size=0.2)
	train_set = TokenDataset(X_train, y_train)
	test_set = TokenDataset(X_test, y_test)

	embed_dim = 100
	# classifier = Classifier(tokenizer.embed_dim, len(classes), tokenizer.embedding)
	classifier = TextCNNClassifier(embed_dim, len(classes), num_embeddings=tokenizer.vocab_size)
	# classifier = RNNAttentionClassifier(embed_dim, len(classes), num_embeddings=tokenizer.vocab_size)

	# classifier = TextCNN(embed_dim, out_features=len(classes))
	# classifier = RNNAttention(embed_dim, num_heads=1, out_features=len(classes))
	# classifier = EmbeddingClassifier(classifier, embedding=tokenizer.embedding)
	# classifier = EmbeddingClassifier(classifier, num_embeddings=tokenizer.vocab_size, embed_dim=embed_dim)

	model = ClassifyModelWrapper(classes=classes)
	# model.train(classifier, train_set, test_set, early_stopping_rounds=2)
	# model.train(classifier, train_set, test_set, early_stopping_rounds=2, collate_fn=TokenizeCollator(tokenizer))
	model.train(classifier, train_set, test_set, early_stopping_rounds=10,
	            collate_fn=PaddingTokenCollator(tokenizer.pad))

	test_texts = ['昆明航B737飞机执行昆明至西安航班 西安机场进近过程中  机组将飞机襟翼位置设置错误触发近地警告事件',
				  '海航 B737飞机执行汉中至广州航班 飞机起飞后发现汉中过站期间货舱有730KG货物未卸机']
	inputs = tokenizer.batch_encode(test_texts)
	print(model.predict_classes_proba(torch.tensor(inputs, dtype=torch.long)))
