"""
pip install tensorflow==2.15.0
"""

# import os
# os.environ["KERAS_BACKEND"] = "torch"
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from nlpx.text_token import PaddingTokenizer
from nlpx.text_token.utils import convert_labels, get_text_length

from keras.models import Model, Sequential
from keras.layers import Layer, GRU, Embedding, Conv1D, GlobalMaxPooling1D, Dense, Input, concatenate, MaxPooling1D, GlobalAveragePooling1D
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical

# from tensorflow.python.keras.model import Model
# from tensorflow.python.keras.layers import GRU, Embedding, Conv1D, GlobalMaxPooling1D, Dense, Input, concatenate, MaxPooling1D, GlobalAveragePooling1D
# from tensorflow.python.keras.callbacks import EarlyStopping
# from tensorflow.python.keras.utils.np_utils import to_categorical

from keras_model_hub import TextCNN, TextCNNModel, TextCNNLayer

def get_text_labels(df: pd.DataFrame, text_col: str, label_col: str, min_length: int = None, max_length: Optional[int] = None):
	if min_length or max_length:
		df['length'] = df[text_col].apply(lambda x: get_text_length(x))
	if min_length:
		df = df[df['length'] >= min_length]
	if max_length:
		df = df[df['length'] <= max_length]
	
	_labels, _classes = convert_labels(df[label_col])
	_texts = df[text_col].values
	return _texts, _labels, _classes


def build_text_cnn(embed_dim: int, vocab_size: int, num_classes: int, kernel_sizes=(2, 3, 4), activation='softmax'):
	inputs = Input(shape=(None,))
	cnn_list = [Conv1D(filters=20, kernel_size=kernel_size, strides=1, padding='same') for kernel_size in kernel_sizes]
	out = Embedding(vocab_size, embed_dim, input_length=None, mask_zero=False)(inputs)
	ys = [GlobalMaxPooling1D()(cnn(out)) for cnn in cnn_list]
	out = concatenate(ys, axis=-1)
	out = Dense(num_classes, activation=activation)(out)
	return Model(inputs=inputs, outputs=out)

def build_text_cnn2(embed_dim: int, vocab_size: int, num_classes: int, kernel_sizes=(2, 3, 4), activation='softmax'):
	inputs = Input(shape=(None,))
	out = Embedding(vocab_size, embed_dim, input_length=None, mask_zero=False)(inputs)
	out = TextCNN(num_classes, 20, kernel_sizes, activation=activation)(out)
	return Model(inputs=inputs, outputs=out)


def build_text_cnn3(embed_dim: int, vocab_size: int, num_classes: int, kernel_sizes=(2, 3, 4), activation='softmax'):
	return Sequential([
		Embedding(vocab_size, embed_dim, input_length=None, mask_zero=False),
		TextCNN(num_classes, 20, kernel_sizes, activation=activation),
	])

def build_text_cnn_rnn(embed_dim: int, vocab_size: int, num_classes: int, kernel_sizes=(2, 3, 4), activation='softmax'):
	inputs = Input(shape=(None,))
	cnn_list = [Conv1D(filters=20, kernel_size=kernel_size, strides=1, padding='same') for kernel_size in kernel_sizes]
	out = Embedding(vocab_size, embed_dim, input_length=None, mask_zero=False)(inputs)
	ys = [MaxPooling1D()(cnn(out)) for cnn in cnn_list]
	out = concatenate(ys, axis=-1)
	# out = GRU(units=64, return_sequences=False)(out) # 不需要Pooling, 但效果不好
	out = GRU(units=64, return_sequences=True)(out)
	out = GlobalMaxPooling1D()(out)
	# out = GlobalAveragePooling1D()(out)
	out = Dense(num_classes, activation=activation)(out)
	return Model(inputs=inputs, outputs=out)

def build_text_cnn_rnn2(embed_dim: int, vocab_size: int, num_classes: int, kernel_sizes=(2, 3, 4), activation='softmax'):
	inputs = Input(shape=(None,))
	out = Embedding(vocab_size, embed_dim, input_length=None, mask_zero=False)(inputs)
	out = TextCNNLayer(20, kernel_sizes)(out)
	# out = GRU(units=64, return_sequences=False)(out) # 不需要Pooling, 但效果不好
	out = GRU(units=64, return_sequences=True)(out)
	out = GlobalMaxPooling1D()(out)
	# out = GlobalAveragePooling1D()(out)
	out = Dense(num_classes, activation=activation)(out)
	return Model(inputs=inputs, outputs=out)
	

if __name__ == '__main__':
	file = '~/project/python/parttime/归档/text_gcn/data/北方地区不安全事件统计20240331.csv'
	data = pd.read_csv(file, encoding='GBK')
	texts, labels, classes = get_text_labels(data, text_col='故障描述', label_col='故障标志')
	del data
	
	stop_words = [str(i) for i in range(1)]
	stop_words.extend(['-', '的', '一', '是', '了', '年', '月', '日'])
	tokenizer = PaddingTokenizer(texts=texts, min_freq=5, cut_type='char', stop_words=stop_words, word_freq=False)
	ids = tokenizer.batch_encode(texts)
	X = np.array(ids)
	y = np.array(labels)  # [int]
	# 以下功能相同，也可以不加
	# y = np.expand_dims(y, axis=1)
	# y = y.reshape(-1, 1)
	
	y = to_categorical(y, len(classes))  # loss用categorical_crossentropy
	
	X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
	
	early_stopping = EarlyStopping(monitor='val_loss', patience=3)
	# model = TextCNNModel(len(classes))
	model = build_text_cnn_rnn2(128, tokenizer.vocab_size, len(classes))
	print(model.summary())
	model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
	# model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
	model.fit(X_train, y_train, batch_size=32, epochs=50, validation_data=(X_test, y_test), callbacks=early_stopping)
	
	test_texts = ['昆明航B737飞机执行昆明至西安航班 西安机场进近过程中  机组将飞机襟翼位置设置错误触发近地警告事件',
	              '海航 B737飞机执行汉中至广州航班 飞机起飞后发现汉中过站期间货舱有730KG货物未卸机']
	
	test_inputs = tokenizer.batch_encode(test_texts)
	test_inputs = np.array(test_inputs)
	test_out = model.predict(test_inputs)
	test_index = np.argmax(test_out, axis=-1)
	
	print('labels', [classes[i] for i in test_index])
	print('probs', [test_out[idx][i] for idx, i in enumerate(test_index)])
	