import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score, recall_score

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 2. 数据预处理
# 2.1 读取数据
train_product = pd.read_csv('../CT_REC/train_product.csv')
view_history_train = pd.read_csv('../CT_REC/view_history_train.csv')
view_history_test = pd.read_csv('../CT_REC/view_history_test.csv')

# 2.2 使用Word2Vec处理description为特征向量
# 训练Word2Vec模型
descriptions = train_product['description'].apply(lambda x: x.split(','))
w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

# 创建product_feat张量
product_feat = {}
for _, row in train_product.iterrows():
    product_id = row['product_id']
    description = row['description'].split(',')
    product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

product_feat_tensor = torch.tensor([product_feat[pid] for pid in train_product['product_id']], dtype=torch.float32)

# 2.3 构造用户历史记录张量
def create_history_and_dislike_tensor(view_history, product_feat, max_history_length=10):
    user_ids = []
    pos_history_tensor = []
    neg_history_tensor = []
    default_feat = np.zeros_like(next(iter(product_feat.values())))

    for _, row in view_history.iterrows():
        user_id = row['user_id']
        pos_product_ids = row['history'].split(',')
        neg_product_ids = row['dislike'].split(',')

        # 处理正样本
        pos_user_history = [product_feat.get(pid, default_feat) for pid in pos_product_ids[:max_history_length]]
        pos_history_tensor.append(pos_user_history)

        # 处理负样本
        neg_user_history = [product_feat.get(pid, default_feat) for pid in neg_product_ids[:max_history_length]]
        neg_history_tensor.append(neg_user_history)

        user_ids.append(user_id)

    # 转换为NumPy数组和PyTorch张量
    pos_histories_np = np.stack(pos_history_tensor, axis=0)
    pos_histories_tensor = torch.tensor(pos_histories_np, dtype=torch.float32)

    neg_histories_np = np.stack(neg_history_tensor, axis=0)
    neg_histories_tensor = torch.tensor(neg_histories_np, dtype=torch.float32)

    return user_ids, pos_histories_tensor, neg_histories_tensor

# 获取正负样本的张量
_, pos_history_train_tensor, neg_history_train_tensor = create_history_and_dislike_tensor(view_history_train, product_feat)
_, pos_history_test_tensor, neg_history_test_tensor = create_history_and_dislike_tensor(view_history_test, product_feat)

# 3. 模型定义
# 3.1 RNN模型定义
# 3.1 RNN模型定义
# class RNNModel(nn.Module):
#     def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
#         super(RNNModel, self).__init__()
#         self.hidden_dim = hidden_dim  # 存储 hidden_dim
#         self.num_layers = num_layers  # 存储 num_layers
#         self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
#         self.fc = nn.Linear(hidden_dim, output_dim)
#
#     def forward(self, x):
#         h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(device)
#         c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(device)
#         out, _ = self.rnn(x, (h0, c0))
#         out = self.fc(out[:, -1, :])
#         return out
# 批标准化（Batch Normalization）
class RNNModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
        super(RNNModel, self).__init__()
        self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.bn = nn.BatchNorm1d(hidden_dim)  # 添加批标准化层
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        h0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)
        c0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)

        out, _ = self.rnn(x, (h0, c0))
        out = out[:, -1, :]  # 取最后一个时间步的输出
        out = self.bn(out)  # 应用批标准化
        out = self.fc(out)
        return out
# 添加噪声或其他正则化方法
# class RNNModel(nn.Module):
#     def __init__(self, input_dim, hidden_dim, num_layers, output_dim, dropout_prob=0.5):
#         super(RNNModel, self).__init__()
#         self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
#         self.fc = nn.Linear(hidden_dim, output_dim)
#         self.dropout = nn.Dropout(dropout_prob)  # 添加 Dropout 层
#
#     def forward(self, x):
#         h0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)
#         c0 = torch.zeros(self.rnn.num_layers, x.size(0), self.rnn.hidden_size).to(device)
#
#         out, _ = self.rnn(x, (h0, c0))
#         out = out[:, -1, :]  # 取最后一个时间步的输出
#         out = self.dropout(out)  # 应用 Dropout
#         out = self.fc(out)
#         return out


# 3.2 Transformer模型定义
class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.transformer(x, x)
        if len(x.shape) > 2:
            x = x.mean(dim=1)
        x = self.fc(x)
        return x

# 3.3 推荐模型定义
class RecModel(nn.Module):
    def __init__(self, rnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.rnn = rnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.rnn(x)
        x = self.transformer(x)
        return x

# 4. 模型训练
rnn_model = RNNModel(input_dim=100, hidden_dim=64, output_dim=100, num_layers=2).to(device)
transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
rec_model = RecModel(rnn_model, transformer_model).to(device)

optimizer = optim.Adam(rec_model.parameters(), lr=0.001)
criterion = nn.MSELoss()

for epoch in range(200):
    rec_model.train()
    optimizer.zero_grad()

    # 处理正样本
    pos_batchUserHisW2CFeat = pos_history_train_tensor.to(device)
    pos_batchUserHisRnnFeat = rnn_model(pos_batchUserHisW2CFeat)
    pos_batchUserHisFinalFeat = transformer_model(pos_batchUserHisRnnFeat)
    # pos_sim_values = torch.sigmoid(pos_batchUserHisFinalFeat).squeeze()
    # 替换sigmoid激活函数
    pos_sim_values = torch.tanh(pos_batchUserHisFinalFeat).squeeze()
    pos_target_values = torch.ones_like(pos_sim_values)  # 正样本目标

    # 处理负样本
    neg_batchUserHisW2CFeat = neg_history_train_tensor.to(device)
    neg_batchUserHisRnnFeat = rnn_model(neg_batchUserHisW2CFeat)
    neg_batchUserHisFinalFeat = transformer_model(neg_batchUserHisRnnFeat)
    # neg_sim_values = torch.sigmoid(neg_batchUserHisFinalFeat).squeeze()
    # 替换sigmoid激活函数
    neg_sim_values = torch.tanh(neg_batchUserHisFinalFeat).squeeze()
    neg_target_values = torch.zeros_like(neg_sim_values)  # 负样本目标

    # 计算损失
    pos_loss = criterion(pos_sim_values, pos_target_values)
    neg_loss = criterion(neg_sim_values, neg_target_values)
    loss = pos_loss + neg_loss

    loss.backward()
    optimizer.step()

    print(f'Epoch {epoch + 1}/100, Loss: {loss.item()}')

# 5. 正样本模型评估
rec_model.eval()

with torch.no_grad():
    pos_batchUserHisW2CFeat_test = pos_history_test_tensor.to(device)
    pos_batchUserHisRnnFeat_test = rnn_model(pos_batchUserHisW2CFeat_test)
    pos_batchUserHisFinalFeat_test = transformer_model(pos_batchUserHisRnnFeat_test)
    # pos_sim_values_test = torch.sigmoid(pos_batchUserHisFinalFeat_test).squeeze()
    # 替换sigmoid激活函数
    pos_sim_values_test = torch.tanh(pos_batchUserHisFinalFeat_test).squeeze()

    recall = recall_score(torch.ones_like(pos_sim_values_test), pos_sim_values_test.round())
    accuracy = accuracy_score(torch.ones_like(pos_sim_values_test), pos_sim_values_test.round())

    print(f'正样本 Recall: {recall:.4f}, 正样本 Accuracy: {accuracy:.4f}')

# 5. 负样本模型评估
rec_model.eval()

with torch.no_grad():
    neg_batchUserHisW2CFeat_test = neg_history_test_tensor.to(device)
    neg_batchUserHisRnnFeat_test = rnn_model(neg_batchUserHisW2CFeat_test)
    neg_batchUserHisFinalFeat_test = transformer_model(neg_batchUserHisRnnFeat_test)
    # neg_sim_values_test = torch.sigmoid(neg_batchUserHisFinalFeat_test).squeeze()
    # 替换sigmoid激活函数
    neg_sim_values_test = torch.tanh(neg_batchUserHisFinalFeat_test).squeeze()

    recall = recall_score(torch.zeros_like(neg_sim_values_test), neg_sim_values_test.round())
    accuracy = accuracy_score(torch.zeros_like(neg_sim_values_test), neg_sim_values_test.round())

    print(f'负样本 Recall: {recall:.4f}, 负样本 Accuracy: {accuracy:.4f}')

# 6. 保存模型和生成结果
torch.save(rec_model.state_dict(), '../result/RecModel.pth')

# 输出正样本评估结果
pos_eval_results = pd.DataFrame({
    'user_id': view_history_test['user_id'],
    'sim_value': pos_sim_values_test.cpu().numpy()
})
print(f"正样本: \n{pos_eval_results}")

# 输出负数样本评估结果
neg_eval_results = pd.DataFrame({
    'user_id': view_history_test['user_id'],
    'sim_value': neg_sim_values_test.cpu().numpy()
})
print(f"负样本: \n{neg_eval_results}")


pos_eval_results.to_csv(f'posRecModelEval.csv', index=False)
neg_eval_results.to_csv(f'negRecModelEval.csv', index=False)

# todo,将正样本评估结果依靠遍历的方式逐行通过kafka消息发出
