import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
import pickle
from scipy.sparse import coo_matrix
import os


# 加载数据
products_df = pd.read_csv('data/products.csv')


# 对分类变量进行编码 product_class1,product_class2,product_saletag,product_color,product_name,product_brand,product_year,product_season
le_class1 = LabelEncoder()
le_class2 = LabelEncoder()
le_saletag = LabelEncoder()
le_color = LabelEncoder()
le_name = LabelEncoder()
le_brand = LabelEncoder()

with open('data/user_encoder.pkl', 'rb') as f:
    le_user = pickle.load(f)

with open('data/product_encoder.pkl', 'rb') as f:
    le_product = pickle.load(f)



unique_class1 = products_df['product_class1'].unique()

# 打印所有 unique 的 class1 值
print("Unique class1 values:")
print(unique_class1)

#打印所有le_product.classes_的值
# all_classes_str = ', '.join(le_product.classes_)
# 打印结果
# print(all_classes_str)


# 数据预处理
products_df['product_class1'] = products_df['product_class1'].astype(str).fillna('missing_class1')
products_df['product_class1'] = le_class1.fit_transform(products_df['product_class1'])
products_df['product_class2'] = products_df['product_class2'].astype(str).fillna('missing_class2')
products_df['product_class2'] = le_class2.fit_transform(products_df['product_class2'])
products_df['product_saletag'] = products_df['product_saletag'].astype(str).fillna('missing_saletag')
products_df['product_saletag'] = le_saletag.fit_transform(products_df['product_saletag'])
products_df['product_color'] = products_df['product_color'].fillna('missing_color').astype(str)
products_df['product_color'] = le_color.fit_transform(products_df['product_color'])
products_df['product_name'] = products_df['product_name'].astype(str).fillna('missing_name')
products_df['product_name'] = le_name.fit_transform(products_df['product_name'])
products_df['product_brand'] = products_df['product_brand'].astype(str).fillna('missing_brand')
products_df['product_brand'] = le_brand.fit_transform(products_df['product_brand'])
products_df['product_year'] = products_df['product_year'].astype('Int64')
products_df['product_season'] = products_df['product_season'].astype('Int64')
products_df['product_price'] = products_df['product_price'].astype('float32')

# 记录被移除的 product_id
removed_product_ids = products_df[products_df['product_price'].isna()]['product_id'].values

# 移除 product_price 为 NaN 的行
products_df = products_df.dropna(subset=['product_price'])

# 移除 product_price <= 0 的行
products_df = products_df[products_df['product_price'] > 0]
# 记录被移除的 product_id
filtered_product_ids = removed_product_ids.tolist()
# 计算在过滤后被移除的 product_id
filtered_product_ids += products_df[products_df['product_price'] <= 0]['product_id'].values.tolist()
# 输出被移除的 product_id
# print("Filtered product IDs:", filtered_product_ids)
duplicate_product_ids_counts = products_df['product_id'].value_counts()[products_df['product_id'].value_counts() > 1]

# 输出这些不唯一的 product_id 及其出现次数
print(duplicate_product_ids_counts)
#删除了部分数据后，重新编码product_id
le_product.fit_transform(products_df['product_id'])

# 批量处理purchases.csv
chunksize = 10 ** 6  # 每次处理 100 万行
rows, cols, data = [], [], []

purchases_chunks = pd.read_csv('data/purchases.csv', chunksize=chunksize)


for chunk in purchases_chunks:

    # 记录被移除的 product_id
    original_product_ids = chunk['product_id'].values.copy()  # 复制原始 product_id 列
    chunk = chunk[~chunk['product_id'].isin(filtered_product_ids)]

    # 找到被移除的 product_id
    removed_ids = set(original_product_ids) - set(chunk['product_id'].values)

    # 输出被移除的 product_id
    if removed_ids:
        print(f"Removed product IDs from current chunk: {removed_ids}")

    chunk['user_id'] = le_user.transform(chunk['user_id'])
    chunk['product_id'] = le_product.transform(chunk['product_id'])


    rows.extend(chunk['user_id'].values)
    cols.extend(chunk['product_id'].values)
    data.extend(chunk['product_amount'].values)

# 构建稀疏交互矩阵
interaction_matrix_sparse = coo_matrix(
    (data, (rows, cols)),
    shape=(len(le_user.classes_), len(le_product.classes_))
)


search_string = 'D1DE6096B'
if search_string in le_product.inverse_transform(cols):
    print(f"{search_string} is in the cols list.")
else:
    print(f"{search_string} is not in the cols list.")

# 准备商品信息
products_df_fixed = products_df[['product_id', 'product_class1', 'product_class2', 'product_saletag', 'product_color', 'product_name', 'product_brand', 'product_year', 'product_season', 'product_price']].set_index('product_id')



class RecommenderDataset(Dataset):

    def pad_data(self, data, max_len, pad_value=-1):
        current_len = len(data)
        if current_len < max_len:
            padding = np.full((max_len - current_len, data.shape[1]), pad_value)
            data = np.vstack((data, padding))
        return data[:max_len]  # 确保返回的数据不会超过 max_len
    def __init__(self, interaction_matrix_sparse_input, product_info_input, num_pos_samples=1, num_neg_samples=4):
        # 将COO格式转换为CSR格式
        self.interaction_matrix_sparse = interaction_matrix_sparse_input.tocsr()
        self.user_ids = np.arange(self.interaction_matrix_sparse.shape[0])
        self.product_ids = np.arange(self.interaction_matrix_sparse.shape[1])
        self.product_info = product_info_input
        self.num_pos_samples = num_pos_samples
        self.num_neg_samples = num_neg_samples

        # 在转换为CSR格式后再进行下标操作
        self.user_item_dict = {user: set(self.interaction_matrix_sparse[user].indices) for user in self.user_ids}



    def __len__(self):
        return self.interaction_matrix_sparse.shape[0]

    def __getitem__(self, idx):
        user = self.user_ids[idx]
        pos_items_idx = self.interaction_matrix_sparse[user].indices
        pos_items_idx = np.random.choice(pos_items_idx, self.num_pos_samples, replace=True)
        # 去重
        # pos_items_idx = list(set(pos_items_idx))
        # 解码为原始产品 ID
        pos_items_ids = le_product.inverse_transform(pos_items_idx)

        # 获取产品年份
        pos_years = self.product_info.loc[pos_items_ids, 'product_year'].values

        # 生成负样本
        neg_items_ids = self.generate_negative_samples(user, pos_years)
        # print("负样本:", neg_items_ids)
        # print("class:",le_product.classes_)
        # print("未见的标签:", [item for item in neg_items_ids if item not in le_product.classes_])
        # if([item for item in neg_items_ids if item not in le_product.classes_]!= []):
        #     print(le_product.classes_)
        neg_items_idx = le_product.transform(neg_items_ids)

        # 获取正样本和负样本的信息
        pos_df = self.product_info.loc[pos_items_ids].reset_index()
        neg_df = self.product_info.loc[neg_items_ids].reset_index()
        pos_df['product_id'] = le_product.transform(pos_df['product_id'])
        neg_df['product_id'] = le_product.transform(neg_df['product_id'])
        pos_df['product_id_index'] = pos_df['product_id']
        pos_df.set_index('product_id_index', inplace=True)
        neg_df['product_id_index'] = neg_df['product_id']
        neg_df.set_index('product_id_index', inplace=True)
        # print('pos_df:',pos_df.dtypes)
        pos_array = np.array(pos_df.to_numpy(), dtype=np.float32)
        neg_array = np.array(neg_df.to_numpy(), dtype=np.float32)
        # print('pos_array:',pos_array.dtype)
        # 转换为PyTorch张量
        pos_item_info = torch.tensor(pos_array, dtype=torch.float32)
        neg_item_info = torch.tensor(neg_array, dtype=torch.float32)

        # 打印每个返回的张量的形状
        # print(f"Index: {idx}, Pos Items Shape: {pos_item_info.shape}, Neg Items Shape: {neg_item_info.shape}")

        return (torch.tensor(user, dtype=torch.long),
                torch.tensor(pos_items_idx, dtype=torch.long),
                torch.tensor(neg_items_idx, dtype=torch.long),
                pos_item_info,
                neg_item_info,
                )

    def generate_negative_samples(self, user, pos_years):
        neg_items = []
        # TODO 增加同类商品
        for year in pos_years:
            year_items = self.product_info[self.product_info['product_year'] == year].index.values
            neg_candidates = np.setdiff1d(year_items,  le_product.inverse_transform(list(self.user_item_dict[user])))
            if len(neg_candidates) > 0:
                neg_items.extend(np.random.choice(neg_candidates, self.num_neg_samples, replace=True))

        # 如果生成的负样本不足，补充随机负样本
        if len(neg_items) < self.num_neg_samples * self.num_pos_samples:
            additional_neg = np.random.choice(self.product_ids,
                                              self.num_neg_samples * self.num_pos_samples - len(neg_items),
                                              replace=True)
            neg_items = np.concatenate([neg_items, le_product.inverse_transform(additional_neg)])
            # print("负样本A:", neg_items)
        else:
            neg_items = np.random.choice(neg_items, self.num_neg_samples * self.num_pos_samples, replace=False)
            # print("负样本B:", neg_items)
        for item in neg_items:
            if item not in self.product_info.index.values:
                if item not in products_df_fixed.index.values:
                    print("未知的产品ID:", item)
                print(item, end='#')
        return neg_items


# 模型定义
class DeepRecommender(nn.Module):
    def __init__(self, num_users, num_items, num_class1, num_class2, num_saletag,  num_colors, num_names, num_brands, embedding_dim):
        super(DeepRecommender, self).__init__()
        self.user_embedding = nn.Embedding(num_users, embedding_dim)
        self.item_embedding = nn.Embedding(num_items, embedding_dim)
        self.class1_embedding = nn.Embedding(num_class1, embedding_dim)
        self.class2_embedding = nn.Embedding(num_class2, embedding_dim)
        self.saletag_embedding = nn.Embedding(num_saletag, embedding_dim)
        self.color_embedding = nn.Embedding(num_colors, embedding_dim)
        self.name_embedding = nn.Embedding(num_names, embedding_dim)
        self.brand_embedding = nn.Embedding(num_brands, embedding_dim)
        self.year_layer = nn.Linear(1, embedding_dim)
        self.season_layer = nn.Linear(1, embedding_dim)
        self.price_layer = nn.Linear(1, embedding_dim)
        self.final_layer = nn.Linear(embedding_dim * 10, embedding_dim)

    def forward(self, user_id, item_id, class1, class2, saletag,  color, name, brand, year, season, price):
        user_vector = self.get_user_vector(user_id)
        item_vector = self.get_item_vector(item_id, class1, class2, saletag,  color, name, brand, year, season, price)
        return user_vector, item_vector

    def get_user_vector(self, user_id):
        user_embed = self.user_embedding(user_id)
        return user_embed

    def get_item_vector(self, item_id, class1, class2, saletag, color, name, brand, year, season, price):

        # print("class1:",class1," -tolist:",class1.tolist()," -Value:",class1.tolist()[0])
        # c2 = le_class1.inverse_transform(class1.tolist()[0])
        # print("class1 value:",c2," -Index:",class1.tolist())
        # max_index = self.class1_embedding.num_embeddings - 1
        # print("Maximum index for class1_embedding:", max_index)

        item_embed = self.item_embedding(item_id)
        class1_embed = self.class1_embedding(class1)
        class2_embed = self.class2_embedding(class2)
        saletag_embed = self.saletag_embedding(saletag)
        color_embed = self.color_embedding(color)
        name_embed = self.name_embedding(name)
        brand_embed = self.brand_embedding(brand)
        year_embed = self.year_layer(year)
        season_embed = self.season_layer(season)
        price_embed = self.price_layer(price)

        # 确保所有嵌入都是二维张量
        item_embed = item_embed.unsqueeze(1) if item_embed.dim() == 1 else item_embed
        class1_embed = class1_embed.unsqueeze(1) if class1_embed.dim() == 1 else class1_embed
        class2_embed = class2_embed.unsqueeze(1) if class2_embed.dim() == 1 else class2_embed
        saletag_embed = saletag_embed.unsqueeze(1) if saletag_embed.dim() == 1 else saletag_embed
        color_embed = color_embed.unsqueeze(1) if color_embed.dim() == 1 else color_embed
        name_embed = name_embed.unsqueeze(1) if name_embed.dim() == 1 else name_embed
        brand_embed = brand_embed.unsqueeze(1) if brand_embed.dim() == 1 else brand_embed
        year_embed = year_embed.unsqueeze(1) if year_embed.dim() == 1 else year_embed
        season_embed = season_embed.unsqueeze(1) if season_embed.dim() == 1 else season_embed
        price_embed = price_embed.unsqueeze(1) if price_embed.dim() == 1 else price_embed

        combined = torch.cat([item_embed, class1_embed, class2_embed, saletag_embed, color_embed, name_embed, brand_embed, year_embed, season_embed, price_embed], dim=-1)

        # 确保combined是三维张量
        if combined.dim() == 2:
            combined = combined.unsqueeze(0)

        batch_size, num_items, _ = combined.shape
        combined_flat = combined.view(batch_size * num_items, -1)

        item_vector = self.final_layer(combined_flat)
        item_vector = item_vector.view(batch_size, num_items, -1)

        return item_vector


# 训练模型
def train_model(model, dataloader, optimizer, criterion, num_epochs=10):
    # for user_id, pos_items, neg_items, pos_info, neg_info, pos_mask, neg_mask in dataloader:
    #     print(f"Batch User IDs Shape: {user_id.shape}, Pos Items Shape: {pos_items.shape}, Neg Items Shape: {neg_items.shape}")
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for user_id, pos_items, neg_items, pos_info, neg_info in dataloader:
            user_id = user_id.long()
            optimizer.zero_grad()
            user_vector = model.get_user_vector(user_id)

            # 调整 pos_vector 的参数
            pos_vector = model.get_item_vector(
                pos_items.long(),
                pos_info[:, :, 1].long(),  # class1
                pos_info[:, :, 2].long(),  # class2
                pos_info[:, :, 3].long(),  # saletag
                pos_info[:, :, 4].long(),  # color
                pos_info[:, :, 5].long(),  # name
                pos_info[:, :, 6].long(),  # brand
                pos_info[:, :, 7:8],        # year (保持为二维张量)
                pos_info[:, :, 8:9],        # season (保持为二维张量)
                pos_info[:, :, 0:1]         # price (保持为二维张量)
            )

            # 调整 neg_vector 的参数
            neg_vector = model.get_item_vector(
                neg_items,
                neg_info[:, :, 1].long(),  # class1
                neg_info[:, :, 2].long(),  # class2
                neg_info[:, :, 3].long(),  # saletag
                neg_info[:, :, 4].long(),  # color
                neg_info[:, :, 5].long(),  # name
                neg_info[:, :, 6].long(),  # brand
                neg_info[:, :, 7:8],        # year (保持为二维张量)
                neg_info[:, :, 8:9],        # season (保持为二维张量)
                neg_info[:, :, 0:1]         # price (保持为二维张量)
            )

            pos_scores = (user_vector.unsqueeze(1) * pos_vector).sum(dim=2)
            neg_scores = (user_vector.unsqueeze(1) * neg_vector).sum(dim=2)


            pos_loss = criterion(pos_scores, torch.ones_like(pos_scores))
            neg_loss = criterion(neg_scores, torch.zeros_like(neg_scores))
            loss = pos_loss + neg_loss
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        print(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(dataloader):.4f}")



# 推荐系统
def get_recommendations_vector_based(model, user_id, item_info_list, top_n=10):
    user_vector = model.get_user_vector(torch.tensor([user_id]))

    item_vectors = []
    valid_items = []

    # 获取当前已知的最大索引
    max_index = len(le_product.classes_) - 1

    for item_info in item_info_list:
        try:
            # 检查商品ID是否已经在编码器中
            if item_info['product_id'] not in le_product.classes_:
                # 为新商品分配一个新的索引
                max_index += 1
                le_product.classes_ = np.append(le_product.classes_, item_info['product_id'])
                item_encoded = max_index
            else:
                item_encoded = le_product.transform([item_info['product_id']])[0]

            item_vector = model.get_item_vector(
                torch.tensor([item_encoded]),
                torch.tensor([item_info['product_class1']]),
                torch.tensor([item_info['product_class2']]),
                torch.tensor([item_info['product_saletag']]),
                torch.tensor([item_info['product_color']]),
                torch.tensor([item_info['product_name']]),
                torch.tensor([item_info['product_brand']]),
                torch.tensor([item_info['product_year']]),
                torch.tensor([item_info['product_seasons']]),
                torch.tensor([[item_info['product_price']]]).float(),

            )
            item_vectors.append(item_vector)
            valid_items.append(item_info)
        except Exception as e:
            print(f"Error processing item {item_info['id']}: {e}")

    if not item_vectors:
        print("No valid items to recommend.")
        return []

    # Concatenate item vectors along the batch dimension
    item_vectors = torch.cat(item_vectors, dim=0)

    # 将user_vector和item_vectors调整为2维张量
    user_vector = user_vector.squeeze(0)  # (1, embedding_dim) -> (embedding_dim)
    item_vectors = item_vectors.squeeze(1)  # (num_items, 1, embedding_dim) -> (num_items, embedding_dim)

    similarities = torch.matmul(user_vector, item_vectors.t()).detach().cpu().numpy()

    top_indices = similarities.argsort()[-top_n:][::-1]
    return [valid_items[i]['product_id'] for i in top_indices]



# 数据准备
dataset = RecommenderDataset(interaction_matrix_sparse, products_df_fixed, num_pos_samples=1, num_neg_samples=4)

dataloader = DataLoader(dataset, batch_size=32)

print("le_class1.classes_ length:",len(le_class1.classes_))
# 模型实例化
model = DeepRecommender(
    num_users=len(le_user.classes_),
    num_items=len(le_product.classes_),
    num_class1=len(le_class1.classes_),
    num_class2=len(le_class2.classes_),
    num_saletag=len(le_saletag.classes_),
    num_colors=len(le_color.classes_),
    num_names=len(le_name.classes_),
    num_brands=len(le_brand.classes_),
    embedding_dim=32
)

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.BCEWithLogitsLoss()

# 训练模型
train_model(model, dataloader, optimizer, criterion, num_epochs=10)

# 获取推荐
user_id = 0  # 示例用户ID
item_info_list = products_df.to_dict('records')
recommendations = get_recommendations_vector_based(model, user_id, item_info_list, top_n=10)
print("Recommended items:", recommendations)

