from datetime import datetime
import pandas as pd
import torch
import numpy as np
from sklearn.metrics import recall_score
from confluent_kafka import Producer
from gensim.models import Word2Vec
from flask import Flask, jsonify

app = Flask(__name__)

# Kafka 配置
KAFKA_BROKER = 'localhost:9092'
USER_CATEGORY_REF_DONE = 'user_category_ref_done'

# 创建 Kafka 生产者实例
producer = Producer({'bootstrap.servers': KAFKA_BROKER})

# 定义深度神经网络模型
class CategoryModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(CategoryModel, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

@app.route('/userCategoryRef', methods=['POST'])
def predict():
    # 读取CSV文件
    predict_df = pd.read_csv('toref_user.csv')

    # 提取文本描述列
    predict_descriptions = predict_df['userDescription']

    # 对描述进行分词
    predict_descriptions = predict_descriptions.apply(lambda x: x.split())

    # 载入Word2Vec模型
    word2vec_model = Word2Vec.load("word2vec.model")

    # 将描述转换为特征向量
    def get_feature_vector(description, model):
        words = description
        feature_vector = np.mean([model.wv[word] for word in words if word in model.wv], axis=0)
        return feature_vector

    predict_vectors = predict_descriptions.apply(lambda x: get_feature_vector(x, word2vec_model))

    # 载入保存的模型
    input_dim = 100  # Word2Vec向量的维度
    hidden_dim = 64
    output_dim = len(predict_df['category'].unique())  # 类别数量

    model = CategoryModel(input_dim, hidden_dim, output_dim)
    model.load_state_dict(torch.load('categoryModel.pth'))
    model.eval()

    # 进行预测
    predict_X = np.stack(predict_vectors.values)
    predict_X = torch.tensor(predict_X, dtype=torch.float32)

    with torch.no_grad():
        predict_outputs = model(predict_X)
        _, predicted_categories = torch.max(predict_outputs, 1)

    # 输出预测结果
    predict_df['category'] = predicted_categories.numpy()
    predict_df.to_csv('first_stage_user_category.csv', index=False)

    # 将文件保存到桌面上
    desktop_path = "C:\\Users\\123\\Desktop\\first_stage_user_category.csv"
    predict_df.to_csv(desktop_path, index=False)

    # 获取当前时间
    create_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # 计算 recall
    recall = recall_score(predict_df['true_category'], predict_df['category'], average='weighted')

    # 发送 Kafka 消息，包含 recall 和 createTime
    producer.produce(
        USER_CATEGORY_REF_DONE,
        value=f'{{"recall": "{recall:.4f}", "createTime": "{create_time}"}}'
    )
    producer.flush()

    return jsonify({'message': 'Prediction complete and Kafka message sent!'})

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5001)
