import json
from datetime import datetime

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from gensim.models import Word2Vec
from flask import Flask, request, jsonify
from confluent_kafka import Producer


app = Flask(__name__)

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Kafka 配置
KAFKA_BROKER = 'localhost:9092'  # 替换为你的 Kafka 代理地址
REF_BY_CT_MODEL_DONE = 'ref_by_rt_model'

# 初始化 Kafka 生产者
producer = Producer({'bootstrap.servers': KAFKA_BROKER})

def send_kafka_message(topic, message):
    producer.produce(topic, message)
    producer.flush()


def preprocess_data(view_history_predict, validate=False):
    # 2. 数据预处理
    # 2.1 读取数据
    train_product = pd.read_csv('../CT_REC/train_product.csv')

    # 2.2 使用Word2Vec处理description为特征向量
    descriptions = train_product['description'].apply(lambda x: x.split(','))
    w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

    product_feat = {}
    for _, row in train_product.iterrows():
        product_id = row['product_id']
        description = row['description'].split(',')
        product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

    def create_history_and_dislike_tensor(view_history, product_feat, max_history_length=10):
        user_ids = []
        candidates = []
        pos_history_tensor = []
        neg_history_tensor = []
        default_feat = np.zeros_like(next(iter(product_feat.values())))

        for row in view_history:  # 接收的view_history_predict是list of dicts
            user_id = row['user_id']
            pos_product_ids = row['history'].split(',')
            neg_product_ids = row['dislike'].split(',')

            pos_user_history = [product_feat.get(pid, default_feat) for pid in pos_product_ids[:max_history_length]]
            pos_history_tensor.append(pos_user_history)

            neg_user_history = [product_feat.get(pid, default_feat) for pid in neg_product_ids[:max_history_length]]
            neg_history_tensor.append(neg_user_history)

            user_ids.append(user_id)
            candidates.append(pos_product_ids)

        pos_histories_np = np.stack(pos_history_tensor, axis=0)
        pos_histories_tensor = torch.tensor(pos_histories_np, dtype=torch.float32)

        neg_histories_np = np.stack(neg_history_tensor, axis=0)
        neg_histories_tensor = torch.tensor(neg_histories_np, dtype=torch.float32)

        return pos_histories_tensor, neg_histories_tensor, user_ids, candidates

    if validate:
        pos_history_validate_tensor, neg_history_validate_tensor, user_ids, candidates = create_history_and_dislike_tensor(
            view_history_predict, product_feat)
        return pos_history_validate_tensor, neg_history_validate_tensor, user_ids, candidates


class RNNModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
        super(RNNModel, self).__init__()
        self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.bn = nn.BatchNorm1d(hidden_dim)
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        h0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)
        c0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)

        out, _ = self.rnn(x, (h0, c0))
        out = out[:, -1, :]
        out = self.bn(out)
        out = self.fc(out)
        return out


class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.transformer(x, x)
        if len(x.shape) > 2:
            x = x.mean(dim=1)
        x = self.fc(x)
        return x


class RecModel(nn.Module):
    def __init__(self, rnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.rnn = rnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.rnn(x)
        x = self.transformer(x)
        return x


@app.route('/predict', methods=['POST'])  # 修改为POST请求
def predict():
    # 接收POST请求数据
    view_history_predict = request.json  # 期望接收到的数据格式为JSON List of Dicts

    pos_history_validate_tensor, neg_history_validate_tensor, user_ids, candidates = preprocess_data(view_history_predict, validate=True)

    rnn_model = RNNModel(input_dim=100, hidden_dim=64, output_dim=100, num_layers=2).to(device)
    transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
    rec_model = RecModel(rnn_model, transformer_model).to(device)

    # Load the trained model
    rec_model.load_state_dict(torch.load('../result/RecModel.pth'))
    rec_model.eval()

    batch_date = datetime.now().strftime("%Y%m%d")
    with torch.no_grad():
        pos_batchUserHisW2CFeat = pos_history_validate_tensor.to(device)
        pos_batchUserHisRnnFeat = rnn_model(pos_batchUserHisW2CFeat)
        pos_batchUserHisFinalFeat = transformer_model(pos_batchUserHisRnnFeat)
        pos_sim_values = torch.tanh(pos_batchUserHisFinalFeat).squeeze()

        # 创建包含 user_id 和 sim_value 的 DataFrame
        pos_eval_results = pd.DataFrame({
            'user_id': user_ids,
            'candidates': candidates,
            'sim_value': pos_sim_values.cpu().numpy(),
            "batchDate": batch_date
        })

        # 将结果转为字典列表形式
        results_list = pos_eval_results.to_dict(orient='records')
        print(f"正样本: \n{results_list}")

        # 保存到文件的部分可以保留或删除，根据需要
        # pos_eval_results.to_csv(f'predictRecModelEval.csv', index=False)


    for _, row in pos_eval_results.iterrows():
        message = {
            "user_id": row['user_id'],
            "candidates": row['candidates'],
            "simValue": row['sim_value'],
            "batchDate": batch_date
        }
        send_kafka_message(REF_BY_CT_MODEL_DONE, json.dumps(message))
    # 返回结果列表
    return jsonify(results_list)


if __name__ == '__main__':
    app.run(port=5001)
