import json
from datetime import datetime
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score, recall_score
from flask import Flask, jsonify
from confluent_kafka import Producer

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 初始化 Flask 应用
app = Flask(__name__)

# Kafka 配置
KAFKA_BROKER = 'localhost:9092'  # 替换为你的 Kafka 代理地址
REF_BY_CT_MODEL_DONE = 'ref_by_ct_model_done'

# 初始化 Kafka 生产者
producer = Producer({'bootstrap.servers': KAFKA_BROKER})

def send_kafka_message(topic, message):
    producer.produce(topic, message)
    producer.flush()

# 定义模型结构
class CNNModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(CNNModel, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.fc = nn.Linear(32 * input_dim * output_dim, output_dim)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x


class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.transformer(x, x)
        if len(x.shape) > 2:
            x = x.mean(dim=1)
        x = self.fc(x)
        return x


class RecModel(nn.Module):
    def __init__(self, cnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.cnn = cnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.cnn(x)
        x = self.transformer(x)
        return x

def create_candidates_tensor(view_history, product_feat, max_history_length=10):
    user_ids = []
    history_tensor = []
    default_feat = np.zeros_like(next(iter(product_feat.values())))

    for _, row in view_history.iterrows():
        user_id = row['user_id']
        product_ids = row['candidates'].split()
        user_history = [product_feat.get(pid, default_feat) for pid in product_ids[:max_history_length]]

        if len(user_history) < max_history_length:
            user_history.extend([default_feat] * (max_history_length - len(user_history)))

        user_ids.append(user_id)
        history_tensor.append(user_history)

    user_histories_np = np.stack(history_tensor, axis=0)
    user_histories_tensor = torch.tensor(user_histories_np, dtype=torch.float32)

    return user_ids, user_histories_tensor

@app.route('/run-model', methods=['GET'])
def run_model():
    # 1. 加载数据
    view_candidate = pd.read_csv('view_candidate.csv')
    train_product = pd.read_csv('train_product.csv')

    # 2. 使用Word2Vec处理description为特征向量
    descriptions = train_product['description'].apply(lambda x: x.split())
    w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

    # 创建product_feat张量
    product_feat = {}
    for _, row in train_product.iterrows():
        product_id = row['product_id']
        description = row['description'].split()
        product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

    # 3. 构造用户历史记录张量
    _, view_candidate_test_tensor = create_candidates_tensor(view_candidate, product_feat)

    # 4. 定义模型结构（与训练时保持一致）
    cnn_model = CNNModel(input_dim=10, output_dim=100).to(device)
    transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
    rec_model = RecModel(cnn_model, transformer_model).to(device)

    # 5. 加载保存的模型
    rec_model.load_state_dict(torch.load('../result/RecModel.pth'))
    rec_model.eval()

    # 6. 模型评估
    with torch.no_grad():
        batchUserHisW2CFeat_test = view_candidate_test_tensor.unsqueeze(1).to(device)
        batchUserHisCnnFeat_test = cnn_model(batchUserHisW2CFeat_test)
        batchUserHisFinalFeat_test = transformer_model(batchUserHisCnnFeat_test)

        sim_values_test = torch.sigmoid(batchUserHisFinalFeat_test).squeeze()

        # 计算评估指标
        recall = recall_score(torch.ones_like(sim_values_test), sim_values_test.round())
        accuracy = accuracy_score(torch.ones_like(sim_values_test), sim_values_test.round())

        print(f'Recall: {recall:.4f}, Accuracy: {accuracy:.4f}')
        batch_date = datetime.now().strftime("%Y%m%d")
        # 输出评估结果
        rec_eval_results = pd.DataFrame({
            'user_id': view_candidate['user_id'],
            'candidates': view_candidate['candidates'],
            'sim_value': sim_values_test.cpu().numpy(),
            'batch_date': batch_date
        })

        rec_eval_results.to_csv(f'ct_ref_results.csv', index=False)

        # # 将文件保存到桌面上
        # desktop_path = "C:\\Users\\123\\Desktop\\ct_ref_results.csv"
        # rec_eval_results.to_csv(desktop_path, index=False)

        # # 生成推荐结果表
        # rec_results = pd.concat([eval_results['user_id'], eval_results['candidate'], eval_results['sim_value']], axis=1)
        # rec_results.to_csv('rec.csv', index=False)

        # 遍历发送 Kafka 消息
    for _, row in rec_eval_results.iterrows():
        message = {
            "user_id": row['user_id'],
            "candidates": row['candidates'],
            "simValue": row['sim_value'],
            "batchDate": row['batch_date']
        }
        send_kafka_message(REF_BY_CT_MODEL_DONE, json.dumps(message))

    return jsonify({'message': 'Model evaluation completed and Kafka message sent.'})

if __name__ == '__main__':
    app.run(host='localhost', port=5000)
