import torch
from torch.utils.data import DataLoader
from data_loader import load_data, collate_fn
from model import LSTMClassifier
from train import train_model

# 文件路径
data_file = "cleaned_news_titles.txt"
embedding_path = "embedding_SougouNews.npz"
save_path = "best_lstm_model.pth"

# 模型超参数
embed_size = 300  # 词向量的维度
hidden_size = 128  # 隐藏层的大小
# 动态计算类别数
train_dataset, test_dataset, word_to_idx, embedding_matrix = load_data(data_file, embedding_path, fixed_length=30)
output_size = 15  # 从训练数据中计算类别数
num_layers = 4  # LSTM的层数
dropout = 0.5
batch_size = 64
num_epochs = 50
learning_rate = 0.001

# 加载数据
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)

# 初始化模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"current device: {device}")
vocab_size = len(word_to_idx)
model = LSTMClassifier(vocab_size, embed_size, hidden_size, output_size, num_layers, dropout, embedding_matrix).to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 开始训练
train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs, device, save_path)
