
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import Word2Vec
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score, recall_score
from datetime import datetime

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 2. 数据预处理
# 2.1 读取数据
train_product = pd.read_csv('train_product.csv')
view_history_train = pd.read_csv('view_history_train.csv')
view_history_test = pd.read_csv('view_history_test.csv')
# 2.2 使用Word2Vec处理description为特征向量
# 训练Word2Vec模型
descriptions = train_product['description'].apply(lambda x: x.split())
w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

# 创建product_feat张量
product_feat = {}
for _, row in train_product.iterrows():
    product_id = row['product_id']
    description = row['description'].split()
    product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

product_feat_tensor = torch.tensor([product_feat[pid] for pid in train_product['product_id']], dtype=torch.float32)
# 2.3 构造用户历史记录张量
import numpy as np
import torch

def create_history_tensor(view_history, product_feat, max_history_length=10):
    user_ids = []
    history_tensor = []
    default_feat = np.zeros_like(next(iter(product_feat.values())))

    for _, row in view_history.iterrows():
        user_id = row['user_id']
        product_ids = row['history'].split()

        # 取max_history_length个product_id, 如果不够则用default_feat补齐
        user_history = [product_feat.get(pid, default_feat) for pid in product_ids[:max_history_length]]

        # 如果不足max_history_length，填充至max_history_length
        if len(user_history) < max_history_length:
            user_history.extend([default_feat] * (max_history_length - len(user_history)))

        # 保存 user_id 和 history tensor
        user_ids.append(user_id)
        history_tensor.append(user_history)

    # 将history_tensor转换为NumPy数组，并转换为PyTorch张量
    user_histories_np = np.stack(history_tensor, axis=0)
    user_histories_tensor = torch.tensor(user_histories_np, dtype=torch.float32)

    return user_ids, user_histories_tensor


view_history_train_tensor = create_history_tensor(view_history_train, product_feat)
view_history_test_tensor = create_history_tensor(view_history_test, product_feat)

# 3. 模型定义
# 3.1 CNN模型定义


class CNNModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(CNNModel, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.fc = nn.Linear(32 * input_dim * output_dim, output_dim)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x
# 3.2 Transformer模型定义
class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    # 为了进一步确保输入和输出形状的匹配，建议在
    # forward
    # 方法中打印张量形状，调试代码：

    def forward(self, x):
        print("Input shape to transformer:", x.shape)
        x = self.transformer(x, x)
        print("Output shape from transformer:", x.shape)

        if len(x.shape) > 2:
            x = x.mean(dim=1)

        print("Shape before linear layer:", x.shape)
        x = self.fc(x)
        return x


# 3.3 推荐模型定义
class RecModel(nn.Module):
    def __init__(self, cnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.cnn = cnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.cnn(x)
        x = self.transformer(x)
        return x
# 4. 模型训练
cnn_model = CNNModel(input_dim=10, output_dim=100).to(device)
transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
rec_model = RecModel(cnn_model, transformer_model).to(device)

optimizer = optim.Adam(rec_model.parameters(), lr=0.001)
criterion = nn.MSELoss()

# 获取 history tensor 而不是整个 tuple
_, view_history_train_tensor = create_history_tensor(view_history_train, product_feat)
_, view_history_test_tensor = create_history_tensor(view_history_test, product_feat)

for epoch in range(100):
    rec_model.train()
    optimizer.zero_grad()

    # 这里的 view_history_train_tensor 是提取的张量
    batchUserHisW2CFeat = view_history_train_tensor.unsqueeze(1).to(device)
    batchUserHisCnnFeat = cnn_model(batchUserHisW2CFeat)
    batchUserHisFinalFeat = transformer_model(batchUserHisCnnFeat)

    sim_values = torch.sigmoid(batchUserHisFinalFeat).squeeze()
    target_values = torch.ones_like(sim_values)

    loss = criterion(sim_values, target_values)
    loss.backward()
    optimizer.step()

    print(f'Epoch {epoch + 1}/100, Loss: {loss.item()}')

# 5. 模型评估
rec_model.eval()

with torch.no_grad():
    batchUserHisW2CFeat_test = view_history_test_tensor.unsqueeze(1).to(device)
    batchUserHisCnnFeat_test = cnn_model(batchUserHisW2CFeat_test)
    batchUserHisFinalFeat_test = transformer_model(batchUserHisCnnFeat_test)

    sim_values_test = torch.sigmoid(batchUserHisFinalFeat_test).squeeze()

    recall = recall_score(torch.ones_like(sim_values_test), sim_values_test.round())
    accuracy = accuracy_score(torch.ones_like(sim_values_test), sim_values_test.round())

    print(f'Recall: {recall:.4f}, Accuracy: {accuracy:.4f}')
# 6. 保存模型和生成结果
# 保存模型
torch.save(rec_model.state_dict(), '../result/RecModel.pth')

# 输出评估结果
eval_results = pd.DataFrame({
    'user_id': view_history_test['user_id'],
    'sim_value': sim_values_test.cpu().numpy()
})

eval_results.to_csv(f'RecModelEval_{datetime.now().strftime("%Y%m%d")}.csv', index=False)

# 生成推荐结果表
rec_results = pd.concat([view_history_test['user_id'], eval_results['sim_value']], axis=1)
rec_results.to_csv('rec.csv', index=False)
# 总结
# 这段代码从数据预处理开始，通过Word2Vec生成产品描述的特征向量，接着使用CNN和Transformer模型进行特征提取和推荐分数计算，最后通过模型训练和评估得到最终的推荐结果。最终的模型和结果都保存为CSV文件，便于进一步分析和应用。