import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score, recall_score
from datetime import datetime

# 检查设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 2. 数据预处理
# 2.1 读取数据
train_product = pd.read_csv('train_product.csv')
view_history_train = pd.read_csv('view_history_train.csv')
view_history_test = pd.read_csv('view_history_test.csv')

# 2.2 使用Word2Vec处理description为特征向量
# 训练Word2Vec模型
descriptions = train_product['description'].apply(lambda x: x.split())
w2v_model = Word2Vec(descriptions, vector_size=100, window=5, min_count=1, workers=4)

# 创建product_feat张量
product_feat = {}
for _, row in train_product.iterrows():
    product_id = row['product_id']
    description = row['description'].split()
    product_feat[product_id] = np.mean([w2v_model.wv[word] for word in description if word in w2v_model.wv], axis=0)

product_feat_tensor = torch.tensor([product_feat[pid] for pid in train_product['product_id']], dtype=torch.float32)

# 2.3 构造用户历史记录张量
def create_history_tensor(view_history, product_feat, max_length=10):
    user_ids = []
    history_and_dislike_tensor = []
    labels = []
    default_feat = np.zeros_like(next(iter(product_feat.values())))

    for _, row in view_history.iterrows():
        user_id = row['user_id']
        history_ids = row['history'].split(',')
        dislike_ids = row['dislike'].split(',')

        user_history = [product_feat.get(pid, default_feat) for pid in history_ids[:max_length]]
        user_dislike = [product_feat.get(pid, default_feat) for pid in dislike_ids[:max_length]]

        # if len(user_history) < max_length:
        #     user_history.extend([default_feat] * (max_length - len(user_history)))

        user_ids.append(user_id)
        history_and_dislike_tensor.append(user_history)
        history_and_dislike_tensor.append(user_dislike)
        print(history_and_dislike_tensor)

    user_histories_dislike_np = np.stack(history_and_dislike_tensor, axis=0)
    user_histories_dislike_np = np.expand_dims(user_histories_dislike_np, axis=1)  # 增加一个通道维度
    user_histories_dislike_tensor = torch.tensor(user_histories_dislike_np, dtype=torch.float32)

    return user_ids, user_histories_dislike_tensor

def create_dislike_tensor(view_history, product_feat, max_length=10):
    user_ids = []
    dislike_tensor = []
    labels = []
    default_feat = np.zeros_like(next(iter(product_feat.values())))

    for _, row in view_history.iterrows():
        user_id = row['user_id']
        dislike_ids = row['dislike'].split(',')

        user_dislike = [product_feat.get(pid, default_feat) for pid in dislike_ids[:max_length]]
        if len(user_dislike) < max_length:
            user_dislike.extend([default_feat] * (max_length - len(user_dislike)))

        user_ids.append(user_id)
        dislike_tensor.append(user_dislike)

    user_dislike_np = np.stack(dislike_tensor, axis=0)
    user_histories_np = np.expand_dims(user_dislike_np, axis=1)  # 增加一个通道维度
    user_histories_tensor = torch.tensor(user_histories_np, dtype=torch.float32)

    return user_ids, user_histories_tensor

_, view_history_dislike_train_tensor = create_history_tensor(view_history_train, product_feat)

_, view_history_dislike_test_tensor = create_history_tensor(view_history_test, product_feat)


# 3. 模型定义
# 3.1 CNN模型定义
class CNNModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(CNNModel, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.fc = nn.Linear(32 * input_dim * output_dim, output_dim)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

# 3.2 Transformer模型定义
class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers):
        super(TransformerModel, self).__init__()
        self.transformer = nn.Transformer(input_dim, nhead, num_layers)
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        x = self.transformer(x, x)
        if len(x.shape) > 2:
            x = x.mean(dim=1)
        x = self.fc(x)
        return x

# 3.3 推荐模型定义
class RecModel(nn.Module):
    def __init__(self, cnn_model, transformer_model):
        super(RecModel, self).__init__()
        self.cnn = cnn_model
        self.transformer = transformer_model

    def forward(self, x):
        x = self.cnn(x)
        x = self.transformer(x)
        return x

# 4. 模型训练
cnn_model = CNNModel(input_dim=10, output_dim=100).to(device)
transformer_model = TransformerModel(input_dim=100, output_dim=1, nhead=5, num_layers=2).to(device)
rec_model = RecModel(cnn_model, transformer_model).to(device)

optimizer = optim.Adam(rec_model.parameters(), lr=0.001)
criterion = nn.BCEWithLogitsLoss()

for epoch in range(1):
    rec_model.train()
    optimizer.zero_grad()

    batchUserHisDisLikeW2CFeat = view_history_dislike_train_tensor.to(device)

    batchUserHisCnnFeat = cnn_model(batchUserHisDisLikeW2CFeat)
    batchUserHisFinalFeat = transformer_model(batchUserHisCnnFeat)


    positive_negative_output_values = torch.sigmoid(batchUserHisFinalFeat).squeeze()

    # Create target values (1s for positive, 0s for negative)
    target_positive_values = torch.ones_like(positive_negative_output_values/2)
    target_negative_values = torch.zeros_like(positive_negative_output_values)
    target_values = torch.cat([target_positive_values, target_negative_values], dim=0)

    # Compute loss
    loss = criterion(positive_output_values, target_values)
    loss.backward()
    optimizer.step()

    print(f'Epoch {epoch + 1}/100, Loss: {loss.item()}')

# 5. 模型评估
rec_model.eval()

with torch.no_grad():
    batchUserHisW2CFeat_test = view_history_test_tensor.to(device)
    batchUserDislikeW2CFeat_test = view_dislike_test_tensor.to(device)

    batchUserHisCnnFeat_test = cnn_model(batchUserHisW2CFeat_test)
    batchUserHisFinalFeat_test = transformer_model(batchUserHisCnnFeat_test)

    batchUserDislikeCnnFeat_test = cnn_model(batchUserDislikeW2CFeat_test)
    batchUserDislikeFinalFeat_test = transformer_model(batchUserDislikeCnnFeat_test)

    positive_output_values_test = torch.sigmoid(batchUserHisFinalFeat_test).squeeze()
    negative_output_values_test = torch.sigmoid(batchUserDislikeFinalFeat_test).squeeze()
    output_values_test = torch.cat([positive_output_values_test, negative_output_values_test], dim=0)

    target_positive_values_test = torch.ones_like(positive_output_values_test)
    target_negative_values_test = torch.zeros_like(negative_output_values_test)
    target_values_test = torch.cat([target_positive_values_test, target_negative_values_test], dim=0)

    recall = recall_score(target_values_test.cpu(), torch.sigmoid(output_values_test).round().cpu())
    accuracy = accuracy_score(target_values_test.cpu(), torch.sigmoid(output_values_test).round().cpu())

    print(f'Recall: {recall:.4f}, Accuracy: {accuracy:.4f}')

# 6. 保存模型和生成结果
torch.save(rec_model.state_dict(), '../result/RecModel.pth')

# 7. 保存记录文件
batch_date = datetime.now().strftime("%Y%m%d")

eval_results = pd.DataFrame({
    'user_id': view_history_test['user_id'],
    'positive_value': positive_output_values_test.cpu().numpy(),
    'negative_value': negative_output_values_test.cpu().numpy(),
    'batch_date': batch_date
})

eval_results.to_csv(f'RecModelEval.csv', index=False)

