from data import data_preprocessing
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense
from sklearn.model_selection import train_test_split
import tensorflow as tf


data_path = r"train.news.csv"
title, word_index, label= data_preprocessing(data_path)

# 构建RNN模型
model = Sequential()
model.add(Embedding(input_dim=len(word_index)+1,output_dim=100,input_length=title.shape[1]))
model.add(LSTM(128))
model.add(Dense(len(label[0]),activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

# 划分训练集和验证集
X_train,X_temp,y_train,y_temp = train_test_split(title,label,test_size=0.3,random_state=42)
X_val,X_test,y_val,y_test = train_test_split(X_temp,y_temp,test_size=0.5,random_state=42)

train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(64)
val_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)).batch(64)

epochs = 50

for epoch in range(epochs):
    for batch_data, batch_labels in train_dataset:
        history = model.train_on_batch(batch_data, batch_labels)
    # 保存模型
    model.save('my_news_classification_model_tf.h5')

print("模型评估")

val_loss, val_accuracy = model.evaluate(val_dataset)
print(f'Validation loss: {val_loss}, Validation accuracy: {val_accuracy}')

print("保存模型")

# 保存模型
model.save('news_classification_model_tf.h5')

# ------test-----------------
# 测试集数据处理
test_data_path = 'test.news.csv'
title,word_index
