# rt_rec_train_test.py
import json
from datetime import datetime

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score, recall_score
from flask import Flask, request, jsonify
from confluent_kafka import Producer

app = Flask(__name__)

# Kafka 配置
KAFKA_BROKER = 'localhost:9092'  # 替换为你的 Kafka 代理地址
REF_BY_CT_MODEL_DONE = 'ref_by_ct_model_done'

# 初始化 Kafka 生产者
producer = Producer({'bootstrap.servers': KAFKA_BROKER})


def send_kafka_message(topic, message):
    producer.produce(topic, message)
    producer.flush()


# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def preprocess_data():
    # 2. 数据预处理
    # 2.1 读取数据
    train_product = pd.read_csv('../CT_REC/train_product.csv')
    view_history_train = pd.read_csv('../CT_REC/view_history_train.csv')
    view_history_test = pd.read_csv('../CT_REC/view_history_test.csv')

    # 2.2 使用Word2Vec处理description为特征向量
    descriptions = train_product['description'].apply(lambda x: x.split(','))
    w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

    product_feat = {}
    for _, row in train_product.iterrows():
        product_id = row['product_id']
        description = row['description'].split(',')
        product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

    def create_history_and_dislike_tensor(view_history, product_feat, max_history_length=10):
        user_ids = []
        pos_history_tensor = []
        neg_history_tensor = []
        default_feat = np.zeros_like(next(iter(product_feat.values())))

        for _, row in view_history.iterrows():
            user_id = row['user_id']
            pos_product_ids = row['history'].split(',')
            neg_product_ids = row['dislike'].split(',')

            pos_user_history = [product_feat.get(pid, default_feat) for pid in pos_product_ids[:max_history_length]]
            pos_history_tensor.append(pos_user_history)

            neg_user_history = [product_feat.get(pid, default_feat) for pid in neg_product_ids[:max_history_length]]
            neg_history_tensor.append(neg_user_history)

            user_ids.append(user_id)

        pos_histories_np = np.stack(pos_history_tensor, axis=0)
        pos_histories_tensor = torch.tensor(pos_histories_np, dtype=torch.float32)

        neg_histories_np = np.stack(neg_history_tensor, axis=0)
        neg_histories_tensor = torch.tensor(neg_histories_np, dtype=torch.float32)

        return pos_histories_tensor, neg_histories_tensor, user_ids  # 确保返回两个值

    pos_history_train_tensor, neg_history_train_tensor, train_user_ids= create_history_and_dislike_tensor(
        view_history_train, product_feat)
    pos_history_test_tensor, neg_history_test_tensor, test_user_ids = create_history_and_dislike_tensor(
        view_history_test, product_feat)

    return pos_history_train_tensor, neg_history_train_tensor,train_user_ids, pos_history_test_tensor, neg_history_test_tensor, test_user_ids


class RNNModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
        super(RNNModel, self).__init__()
        self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.bn = nn.BatchNorm1d(hidden_dim)  # 添加批标准化层
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        h0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)
        c0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)

        out, _ = self.rnn(x, (h0, c0))
        out = out[:, -1, :]  # 取最后一个时间步的输出
        out = self.bn(out)  # 应用批标准化
        out = self.fc(out)
        return out


class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.transformer(x, x)
        if len(x.shape) > 2:
            x = x.mean(dim=1)
        x = self.fc(x)
        return x


class RecModel(nn.Module):
    def __init__(self, rnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.rnn = rnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.rnn(x)
        x = self.transformer(x)
        return x


@app.route('/train', methods=['GET'])
def train_and_test():
    global pos_train_results
    batch_date = datetime.now().strftime('%Y-%m-%d')
    pos_history_train_tensor, neg_history_train_tensor,train_user_ids, pos_history_test_tensor, neg_history_test_tensor, test_user_ids = preprocess_data()

    rnn_model = RNNModel(input_dim=100, hidden_dim=64, output_dim=100, num_layers=2).to(device)
    transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
    rec_model = RecModel(rnn_model, transformer_model).to(device)

    optimizer = optim.Adam(rec_model.parameters(), lr=0.01)
    criterion = nn.MSELoss()

    for epoch in range(200):
        rec_model.train()
        optimizer.zero_grad()

        # 处理正样本
        pos_batchUserHisW2CFeat = pos_history_train_tensor.to(device)
        pos_batchUserHisRnnFeat = rnn_model(pos_batchUserHisW2CFeat)
        pos_batchUserHisFinalFeat = transformer_model(pos_batchUserHisRnnFeat)
        pos_sim_values = torch.tanh(pos_batchUserHisFinalFeat).squeeze()
        pos_target_values = torch.ones_like(pos_sim_values)  # 正样本目标

        # 处理负样本
        neg_batchUserHisW2CFeat = neg_history_train_tensor.to(device)
        neg_batchUserHisRnnFeat = rnn_model(neg_batchUserHisW2CFeat)
        neg_batchUserHisFinalFeat = transformer_model(neg_batchUserHisRnnFeat)
        neg_sim_values = torch.tanh(neg_batchUserHisFinalFeat).squeeze()
        neg_target_values = torch.zeros_like(neg_sim_values)  # 负样本目标

        # 计算损失
        pos_loss = criterion(pos_sim_values, pos_target_values)
        neg_loss = criterion(neg_sim_values, neg_target_values)
        loss = pos_loss + neg_loss

        loss.backward()
        optimizer.step()

        print(f'Epoch {epoch + 1}/200, Loss: {loss.item()}')
        pos_train_results = pd.DataFrame({
            'Epoch': [epoch + 1],
            'Loss': [loss.item()]
        })
    for _, row in pos_train_results.iterrows():
        message = {
            "Epoch": row['Epoch'],
            "Loss": row['Loss'],
            "batchDate": batch_date
        }
        send_kafka_message("rt_rec_pos_train", json.dumps(message))
    # 保存模型
    torch.save(rec_model.state_dict(), '../result/RecModel.pth')

    # 正样本评估
    rec_model.eval()
    with torch.no_grad():
        pos_batchUserHisW2CFeat_test = pos_history_test_tensor.to(device)
        pos_batchUserHisRnnFeat_test = rnn_model(pos_batchUserHisW2CFeat_test)
        pos_batchUserHisFinalFeat_test = transformer_model(pos_batchUserHisRnnFeat_test)
        pos_sim_values_test = torch.tanh(pos_batchUserHisFinalFeat_test).squeeze()

        recall = recall_score(torch.ones_like(pos_sim_values_test), pos_sim_values_test.round())
        accuracy = accuracy_score(torch.ones_like(pos_sim_values_test), pos_sim_values_test.round())

        print(f'正样本 Recall: {recall:.4f}, 正样本 Accuracy: {accuracy:.4f}')

        pos_eval_results = pd.DataFrame({
            'Recall': [recall],
            'Accuracy': [accuracy]
        })
    for _, row in pos_eval_results.iterrows():
        message = {
            "Recall": row['Recall'],
            "Accuracy": row['Accuracy'],
            "batchDate": batch_date
        }
        send_kafka_message("rt_rec_pos_eval", json.dumps(message))

    # 负样本评估
    # with torch.no_grad():
    #     neg_batchUserHisW2CFeat_test = neg_history_test_tensor.to(device)
    #     neg_batchUserHisRnnFeat_test = rnn_model(neg_batchUserHisW2CFeat_test)
    #     neg_batchUserHisFinalFeat_test = transformer_model(neg_batchUserHisRnnFeat_test)
    #     neg_sim_values_test = torch.tanh(neg_batchUserHisFinalFeat_test).squeeze()
    #
    #     recall = recall_score(torch.zeros_like(neg_sim_values_test), neg_sim_values_test.round())
    #     accuracy = accuracy_score(torch.zeros_like(neg_sim_values_test), neg_sim_values_test.round())
    #
    #     print(f'负样本 Recall: {recall:.4f}, 负样本 Accuracy: {accuracy:.4f}')
    #
    #     neg_eval_results = pd.DataFrame({
    #         'user_id': view_history_test['user_id'],
    #         'sim_value': neg_sim_values_test.cpu().numpy()
    #     })
    #     neg_eval_results.to_csv(f'negRecModelEval.csv', index=False)
    #
    return jsonify({"status": "training and testing completed"})


if __name__ == '__main__':
    app.run(port=5000)
