from datetime import datetime
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from gensim.models import Word2Vec
from flask import Flask, jsonify

app = Flask(__name__)


# 定义深度神经网络模型
class CategoryModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(CategoryModel, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out


@app.route('/userCategoryTrain', methods=['POST'])
def train_model():
    # 读取CSV文件
    train_df = pd.read_csv('train_user.csv')
    test_df = pd.read_csv('test_user.csv')

    # 提取文本描述列
    train_descriptions = train_df['userDescription']
    test_descriptions = test_df['userDescription']

    # 对描述进行分词
    train_descriptions = train_descriptions.apply(lambda x: x.split())
    test_descriptions = test_descriptions.apply(lambda x: x.split())

    # 合并所有描述以便训练Word2Vec模型
    all_descriptions = pd.concat([train_descriptions, test_descriptions])

    # 训练Word2Vec模型
    model = Word2Vec(all_descriptions, vector_size=100, window=5, min_count=1, workers=4)
    model.save("word2vec.model")

    # 将描述转换为特征向量
    def get_feature_vector(description, model):
        words = description
        feature_vector = np.mean([model.wv[word] for word in words if word in model.wv], axis=0)
        return feature_vector

    train_vectors = train_descriptions.apply(lambda x: get_feature_vector(x, model))
    test_vectors = test_descriptions.apply(lambda x: get_feature_vector(x, model))

    # 参数设置
    input_dim = 100  # Word2Vec向量的维度
    hidden_dim = 64
    output_dim = train_df['category'].nunique()  # 类别数量
    num_epochs = 20
    learning_rate = 0.001

    # 数据准备
    train_X = np.stack(train_vectors.values)
    train_y = train_df['category'].values
    test_X = np.stack(test_vectors.values)
    test_y = test_df['category'].values

    # 转换为PyTorch张量
    train_X = torch.tensor(train_X, dtype=torch.float32)
    train_y = torch.tensor(train_y, dtype=torch.long)
    test_X = torch.tensor(test_X, dtype=torch.float32)
    test_y = torch.tensor(test_y, dtype=torch.long)

    # 模型实例化
    model = CategoryModel(input_dim, hidden_dim, output_dim)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 保存每个epoch的损失值
    training_records = []

    # 模型训练
    for epoch in range(num_epochs):
        model.train()
        outputs = model(train_X)
        loss = criterion(outputs, train_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 保存 epoch 和损失
        training_records.append({'Epoch': epoch + 1, 'Loss': loss.item()})

        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

    # 将训练记录保存为CSV文件
    training_df = pd.DataFrame(training_records)
    training_df.to_csv('final_train_record.csv', index=False)

    # 保存模型
    torch.save(model.state_dict(), 'categoryModel.pth')

    return jsonify({'message': 'Model trained and saved successfully!'})


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
