import gc
import torch
import pandas as pd
from sklearn.model_selection import train_test_split
from nlpx.dataset import TokenizeCollator, TextDFDataset,TextDataset, TokenDataset, SameLengthTokenDataset, PaddingTokenCollator, TextVecCollator
from nlpx.text_token import TokenEmbedding, Tokenizer
from nlpx.text_token.utils import convert_labels, get_text_length
from nlpx.text_token.stats import show_token_freq_plot, show_sentence_len_hist
from nlpx.model import TextCNN
from nlpx.model.wrapper import ClassifyModelWrapper
from nlpx.model.classifier import TextCNNClassifier, RNNAttentionClassifier
from sklearn.model_selection import train_test_split
from nlpx.llm import AlbertTokenizeVec, ErnieTokenizeVec, train_test_set


pretrained_path = r'/Users/summy/data/albert_small_zh'
pretrained_path2 = r'/Users/summy/data/shizhan/sgns.weibo.word.bz2'


def tokenize_test(texts, labels, classes):
	# tokenizer = TokenEmbedding(pretrained_path2)
	# tokenizer.save("weibo.vocab.txt")
	tokenizer = Tokenizer(corpus=texts, min_freq=20)
	# tokenizer = Tokenizer.load("weibo.vocab.txt")
	print('Tokenizer loaded')

	tokens = tokenizer.batch_encode(texts, padding=False)
	del texts
	gc.collect()
	print('batch_encode')
	X_train, X_test, y_train, y_test = train_test_split(tokens, labels, test_size=0.1)
	train_set = TokenDataset(X_train, y_train)
	test_set = TokenDataset(X_test, y_test)

	embed_dim = 100
	# classifier = TextCNNClassifier(embed_dim, len(classes), num_embeddings=tokenizer.vocab_size)
	classifier = RNNAttentionClassifier(embed_dim, len(classes), num_heads=4, num_embeddings=tokenizer.vocab_size)

	model = ClassifyModelWrapper(classes=classes)
	model.train(classifier, train_set, test_set, early_stopping_rounds=4,
	            collate_fn=PaddingTokenCollator(tokenizer.pad, max_length=25))

	# test_texts = pd.read_csv('~/data/shizhan/test.csv')['sentence'].values
	# inputs = tokenizer.batch_encode(test_texts)
	# results = model.predict_classes_proba(torch.tensor(inputs, dtype=torch.long))
	#
	# results_df = pd.DataFrame({"classes": results[0], "proba": results[1]})
	# results_df.to_csv('results.csv', header=False)


def bert_test(texts, labels, classes):
	X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=0.1)

	tokenize_vec = AlbertTokenizeVec(pretrained_path)
	train_set, test_set = train_test_set(tokenize_vec, X_train, X_test, y_train, y_test)

	text_cnn = TextCNN(tokenize_vec.hidden_size, out_features=len(classes))
	model = ClassifyModelWrapper(classes=classes)
	model.train(text_cnn, train_set, test_set, early_stopping_rounds=2)

	test_texts = pd.read_csv('~/data/shizhan/test.csv')['sentence'].values
	results = model.predict_classes_proba(tokenize_vec(test_texts))

	results_df = pd.DataFrame({"classes": results[0], "proba": results[1]})
	results_df.to_csv('results.csv', header=False)


def bert_test2(texts, labels, classes):
	tokenize_vec = AlbertTokenizeVec(pretrained_path)
	X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=0.1)
	train_set = TextDataset(X_train, y_train)
	test_set = TextDataset(X_test, y_test)
	text_cnn = TextCNN(tokenize_vec.hidden_size, out_features=len(classes))
	model = ClassifyModelWrapper(classes=classes)
	model.train(text_cnn, train_set, test_set, early_stopping_rounds=1, collate_fn=TextVecCollator(tokenize_vec, 25))


if __name__ == '__main__':
	df = pd.read_csv('~/data/shizhan/train.csv')
	df.drop(columns=['id', 'label_desc'], inplace=True)
	df['length'] = df['sentence'].apply(lambda x: get_text_length(x))
	df = df[df['length'] > 1]
	# df = df.loc[:1000]
	# df['label'] = df['label'].astype('category')
	# classes = df['label'].cat.categories.tolist()
	# labels = df['label'].cat.codes
	labels, classes = convert_labels(df['label'])
	texts = df['sentence'].values
	del df
	gc.collect()

	tokenize_test(texts, labels, classes)
	# bert_test(texts, labels, classes)
	# bert_test2(texts, labels, classes)
	# show_sentence_len_hist(texts)
	# show_token_freq_plot(texts)
