from flask import Flask, jsonify
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import Word2Vec
from datetime import datetime
from confluent_kafka import Producer

app = Flask(__name__)

# Kafka 配置
KAFKA_BROKER = 'localhost:9092'
TRAIN_CT_MODEL_DONE = 'train_ct_model_done'

# 创建 Kafka 生产者实例
producer = Producer({'bootstrap.servers': KAFKA_BROKER})

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def train_model():
    # 1. 数据预处理
    train_product = pd.read_csv('train_product.csv')
    view_history_train = pd.read_csv('view_history_train.csv')

    # 2. 使用Word2Vec处理description为特征向量
    descriptions = train_product['description'].apply(lambda x: x.split())
    w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

    # 创建product_feat张量
    product_feat = {}
    for _, row in train_product.iterrows():
        product_id = row['product_id']
        description = row['description'].split()
        product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

    product_feat_tensor = torch.tensor([product_feat[pid] for pid in train_product['product_id']], dtype=torch.float32)

    # 3. 构造用户历史记录张量
    def create_history_tensor(view_history, product_feat, max_history_length=10):
        user_ids = []
        history_tensor = []
        default_feat = np.zeros_like(next(iter(product_feat.values())))

        for _, row in view_history.iterrows():
            user_id = row['user_id']
            product_ids = row['history'].split()
            user_history = [product_feat.get(pid, default_feat) for pid in product_ids[:max_history_length]]

            if len(user_history) < max_history_length:
                user_history.extend([default_feat] * (max_history_length - len(user_history)))

            user_ids.append(user_id)
            history_tensor.append(user_history)

        user_histories_np = np.stack(history_tensor, axis=0)
        user_histories_tensor = torch.tensor(user_histories_np, dtype=torch.float32)

        return user_ids, user_histories_tensor

    view_history_train_tensor = create_history_tensor(view_history_train, product_feat)

    # 4. 模型定义
    class CNNModel(nn.Module):
        def __init__(self, input_dim, output_dim):
            super(CNNModel, self).__init__()
            self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
            self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
            self.fc = nn.Linear(32 * input_dim * output_dim, output_dim)

        def forward(self, x):
            x = torch.relu(self.conv1(x))
            x = torch.relu(self.conv2(x))
            x = x.view(x.size(0), -1)
            x = self.fc(x)
            return x

    class TransformerModel(nn.Module):
        def __init__(self, input_dim, output_dim, nhead, num_layers):
            super(TransformerModel, self).__init__()
            self.transformer = nn.Transformer(input_dim, nhead, num_layers)
            self.fc = nn.Linear(input_dim, output_dim)

        def forward(self, x):
            x = self.transformer(x, x)
            if len(x.shape) > 2:
                x = x.mean(dim=1)
            x = self.fc(x)
            return x

    class RecModel(nn.Module):
        def __init__(self, cnn_model, transformer_model):
            super(RecModel, self).__init__()
            self.cnn = cnn_model
            self.transformer = transformer_model

        def forward(self, x):
            x = self.cnn(x)
            x = self.transformer(x)
            return x

    # 5. 模型训练
    cnn_model = CNNModel(input_dim=10, output_dim=100).to(device)
    transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
    rec_model = RecModel(cnn_model, transformer_model).to(device)

    optimizer = optim.Adam(rec_model.parameters(), lr=0.001)
    criterion = nn.MSELoss()

    _, view_history_train_tensor = create_history_tensor(view_history_train, product_feat)

    # 保存每个epoch的损失值
    training_records = []
    batch_date = datetime.now().strftime('%Y-%m-%d')

    for epoch in range(100):
        rec_model.train()
        optimizer.zero_grad()

        batchUserHisW2CFeat = view_history_train_tensor.unsqueeze(1).to(device)
        batchUserHisCnnFeat = cnn_model(batchUserHisW2CFeat)
        batchUserHisFinalFeat = transformer_model(batchUserHisCnnFeat)

        sim_values = torch.sigmoid(batchUserHisFinalFeat).squeeze()
        target_values = torch.ones_like(sim_values)

        loss = criterion(sim_values, target_values)
        loss.backward()
        optimizer.step()

        # 保存 epoch 和损失
        training_records.append({'Epoch': epoch + 1, 'Loss': loss.item(), 'batch_date': batch_date})

        print(f'Epoch {epoch + 1}/100, Loss: {loss.item()}')

    # 将训练记录保存为CSV文件
    training_df = pd.DataFrame(training_records)
    training_df.to_csv('final_train_record.csv', index=False)

    # 将文件保存到桌面上
    desktop_path = "C:\\Users\\123\\Desktop\\final_train_record.csv"
    training_df.to_csv(desktop_path, index=False)

    # 6. 保存模型
    torch.save(rec_model.state_dict(), '../result/RecModel.pth')

    # 发送 Kafka 消息
    producer.produce(TRAIN_CT_MODEL_DONE, value='success')
    producer.flush()

    return "Model training completed, saved, and Kafka message sent."


@app.route('/train', methods=['GET'])
def trigger_training():
    result = train_model()
    return jsonify({"message": result})


if __name__ == '__main__':
    app.run(port=5000)
