import torch
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from transformers import BertTokenizer, BertModel
from tqdm import tqdm
# 设置随机种子以确保结果可复现
torch.manual_seed(23)

# 加载BERT模型和分词器
model_name = "D:\wmm\large_model\\bert"
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)

# 加载数据集
movies_df = pd.read_csv('data/movies.csv')
ratings_df = pd.read_csv('data/ratings.csv')

# 为 movies_df['genres'] 中的每个电影类型生成嵌入


def generate_embeddings(text):
    inputs = tokenizer(text, return_tensors="pt",
                       padding=True, truncation=True)
    with torch.no_grad():
        outputs = model(**inputs)
    return outputs.last_hidden_state.mean(dim=1).detach().numpy()


genre_embeddings = np.vstack([generate_embeddings(genre)
                             for genre in movies_df['genres']])

# 使用PCA进行降维
pca = PCA(n_components=128)  # 以128为目标维度
reduced_genre_embeddings = pca.fit_transform(genre_embeddings)

# 将电影ID和类型信息与BERT嵌入特征合并
combined_features = pd.DataFrame(reduced_genre_embeddings)
combined_features.insert(0, 'Genres', movies_df['genres'])
combined_features.insert(0, 'MovieID', movies_df['movieId'])

combined_features.to_csv(
    'D:\wmm\deeplearning\课设推荐系统\data/feature.csv', index=False)
