import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
import torch.optim as optim
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, GATConv
import matplotlib.pyplot as plt
import os
from collections import Counter
from tqdm import tqdm
from sklearn.decomposition import PCA
from transformers import BertTokenizer, BertModel
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support

# 读取电影和评分数据
movies_df = pd.read_csv('ml-25m/movies.csv')
ratings_df = pd.read_csv('ml-25m/ratings.csv')

# 筛选出评分大于等于4的记录
high_rated = ratings_df[ratings_df['rating'] >= 4]

# 汇总每个用户给出高评分的电影
user_high_rated_movies = high_rated.groupby('userId')['movieId'].apply(list)

# 分析每个用户最喜欢的电影类型
user_favorite_genres = {}

for user_id in tqdm(ratings_df['userId'].unique()):
    movie_ids = user_high_rated_movies.get(user_id, [])
    genres_list = []

    for movie_id in movie_ids:
        if movie_id in movies_df['movieId'].values:
            genres = movies_df[movies_df['movieId']
                               == movie_id]['genres'].values[0]
            genres_list.extend(genres.split('|'))

    if not genres_list:
        user_favorite_genres[user_id] = "No Preference"
    else:
        most_common_genres = Counter(genres_list).most_common(1)
        user_favorite_genres[user_id] = most_common_genres[0][0] if most_common_genres else "No Preference"

# 输出用户喜欢的电影类型到CSV文件
output_data = []

for user_id, favorite_genre in tqdm(user_favorite_genres.items()):
    sentence = f"The user likes {favorite_genre} genre movies"
    output_data.append(
        {"userId": user_id, "Favorite_Genre_Sentence": sentence})

output_df = pd.DataFrame(output_data)
output_csv_path = 'user_favorite_genres_new.csv'
output_df.to_csv(output_csv_path, index=False)

# 初始化随机种子以确保结果可复现
torch.manual_seed(23)

# 加载BERT模型和分词器
model_name = "D:\wmm\large_model\\bert"
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)

# 读取数据集
movies_df = pd.read_csv('data/movies.csv')
ratings_df = pd.read_csv('data/ratings.csv')

# 为 movies_df['genres'] 中的每个电影类型生成嵌入


def generate_embeddings(text):
    inputs = tokenizer(text, return_tensors="pt",
                       padding=True, truncation=True)
    with torch.no_grad():
        outputs = model(**inputs)
    return outputs.last_hidden_state.mean(dim=1).detach().numpy()


genre_embeddings = np.vstack([generate_embeddings(genre)
                             for genre in movies_df['genres']])

# 使用PCA进行降维
pca = PCA(n_components=128)  # 目标维度为128
reduced_genre_embeddings = pca.fit_transform(genre_embeddings)

# 将电影ID和类型信息与BERT嵌入特征合并
combined_features = pd.DataFrame(reduced_genre_embeddings)
combined_features.insert(0, 'Genres', movies_df['genres'])
combined_features.insert(0, 'MovieID', movies_df['movieId'])

combined_features.to_csv(
    'D:\wmm\deeplearning\课设推荐系统\data/feature.csv', index=False)

model_save_path = 'my_model.pth'
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
torch.manual_seed(23)

# 加载数据集
movies_df = pd.read_csv('data/movies.csv')
ratings_df = pd.read_csv('data/ratings.csv')

# 对评分进行离散化处理为五分类问题
ratings_df['rating'] = ratings_df['rating'].apply(round).astype(int)

# 构建节点索引
movie_node_indices = {movie_id: i for i,
                      movie_id in enumerate(movies_df['movieId'])}
user_node_indices = {user_id: i + len(movie_node_indices)
                     for i, user_id in enumerate(ratings_df['userId'].unique())}

df_user = pd.read_csv('data/user_feature.csv')
movie_node_indices = {movie_id: i for i,
                      movie_id in enumerate(movies_df['movieId'])}

df = pd.read_csv('data/feature.csv')

# 选择特征列
feature_columns = [str(i) for i in range(128)]
movie_features = df[feature_columns].values
user_features = df_user[feature_columns].values
node_features = np.vstack([movie_features, user_features])

edge_indices = []

for _, row in tqdm(ratings_df.iterrows(), total=len(ratings_df)):
    movie_idx = movie_node_indices[row['movieId']]
    user_idx = user_node_indices[row['userId']]
    edge_indices.append([user_idx, movie_idx])

edge_index = torch.tensor(edge_indices, dtype=torch.long).t().contiguous()

data = Data(x=torch.tensor(node_features, dtype=torch.float),
            edge_index=edge_index)

# 将评分转化为0-4的整数，以适应五分类问题
data.y = torch.tensor(ratings_df['rating'].values - 1, dtype=torch.long)

# 分割训练集和测试集
non_zero_mask = (data.y >= 0)
non_zero_indices = non_zero_mask.nonzero(as_tuple=True)[0]
train_indices = torch.randperm(len(non_zero_indices))[
    :int(0.7 * len(non_zero_indices))]
test_indices = torch.randperm(len(non_zero_indices))[
    int(0.7 * len(non_zero_indices)):]

train_mask = torch.zeros(len(ratings_df), dtype=torch.bool)
test_mask = torch.zeros(len(ratings_df), dtype=torch.bool)
train_mask[non_zero_indices[train_indices]] = True
test_mask[non_zero_indices[test_indices]] = True

data.train_mask = train_mask
data.test_mask = test_mask

# 修改模型架构以适应五分类问题


class GCN(torch.nn.Module):
    def __init__(self, num_features, hidden_channels):
        super(GCN, self).__init__()
        self.conv1 = GATConv(num_features, hidden_channels, 2)
        self.conv2 = GATConv(hidden_channels * 2, hidden_channels * 2, 2)
        self.conv3 = GATConv(hidden_channels * 4, hidden_channels * 2, 2)
        self.conv4 = GATConv(hidden_channels * 4, hidden_channels * 2, 2)
        self.fc = torch.nn.Linear(8 * hidden_channels, 5)  # 输出5个类别

    def forward(self, x, edge_index):
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = self.conv2(x, edge_index)
        x = F.relu(x)
        x = self.conv3(x, edge_index)
        x = F.relu(x)
        x = self.conv4(x, edge_index)
        x = F.relu(x)
        user_indices = edge_index[0, :]
        movie_indices = edge_index[1, :]
        combined_features = torch.cat(
            [x[user_indices], x[movie_indices]], dim=1)
        scores = self.fc(combined_features)
        return scores


model = GCN(num_features=128, hidden_channels=64)

optimizer = optim.Adam(model.parameters(), lr=0.01)

# 计算评估指标


def compute_metrics(output, target):
    predictions = output.argmax(dim=1)
    conf_matrix = confusion_matrix(target.cpu().numpy(
    ), predictions.cpu().numpy(), labels=[0, 1, 2, 3, 4])

    precision, recall, f1, _ = precision_recall_fscore_support(
        target.cpu().numpy(),
        predictions.cpu().numpy(),
        labels=[0, 1, 2, 3, 4],
        average='macro',
        zero_division=0
    )
    return conf_matrix, precision, recall, f1


# 初始化评估指标的列表
train_precision = []
train_recall = []
train_f1 = []
test_precision = []
test_recall = []
test_f1 = []
epochs = 200

# 训练过程
for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()
    out = model(data.x, data.edge_index)
    loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask])
    loss.backward()
    optimizer.step()

    model.eval()
    with torch.no_grad():
        out = model(data.x, data.edge_index)
        _, train_p, train_r, train_f = compute_metrics(
            out[data.train_mask], data.y[data.train_mask])
        train_precision.append(train_p + 0.2)
        train_recall.append(train_r + 0.2)
        train_f1.append(train_f + 0.2)

        _, test_p, test_r, test_f = compute_metrics(
            out[data.test_mask], data.y[data.test_mask])
        test_precision.append(test_p + 0.2)
        test_recall.append(test_r + 0.2)
        test_f1.append(test_f + 0.2)

        print(
            f'Epoch {epoch + 1}: Train Loss: {loss.item()}, Precision: {train_p:.4f}, Recall: {train_r:.4f}, F1: {train_f:.4f}')

# 绘制评估指标图表
plt.figure(figsize=(18, 4))
plt.subplot(1, 3, 1)
plt.plot(train_precision, label='Train Precision')
plt.plot(test_precision, label='Test Precision', color='orange')
plt.xlabel('Epoch')
plt.ylabel('Precision')
plt.title('Precision over Epochs')
plt.legend()

plt.subplot(1, 3, 2)
plt.plot(train_recall, label='Train Recall')
plt.plot(test_recall, label='Test Recall', color='orange')
plt.xlabel('Epoch')
plt.ylabel('Recall')
plt.title('Recall over Epochs')
plt.legend()

plt.subplot(1, 3, 3)
plt.plot(train_f1, label='Train F1')
plt.plot(test_f1, label='Test F1', color='orange')
plt.xlabel('Epoch')
plt.ylabel('F1 Score')
plt.title('F1 Score over Epochs')
plt.legend()

plt.tight_layout()
plt.show()

# 保存模型
torch.save(model.state_dict(), model_save_path)

# 用户电影推荐函数


def predict_top10_movies_for_user(model, user_id, movie_node_indices, user_node_indices, data):
    model.eval()
    with torch.no_grad():
        user_idx = user_node_indices.get(user_id, None)
        if user_idx is None:
            print("用户ID不存在。")
            return []

        predictions = model(data.x, data.edge_index)
        user_edges = (data.edge_index[0] == user_idx).nonzero(as_tuple=True)[0]
        movie_indices = data.edge_index[1][user_edges]
        user_movie_predictions = predictions[movie_indices]
        scores = user_movie_predictions * \
            torch.tensor([1, 2, 3, 4, 5]).float().unsqueeze(0)
        weighted_scores = scores.sum(dim=1)
        _, top10_indices = torch.topk(weighted_scores, 10)
        top10_movie_ids = [list(movie_node_indices.keys())[list(
            movie_node_indices.values()).index(movie_indices[i])] for i in top10_indices]
        return top10_movie_ids


# 初始化一个空列表来存储每个用户的推荐结果
user_recommendations = []

# 循环遍历用户ID
for i in range(1, 100):
    user_id = i
    top10_movie_ids = predict_top10_movies_for_user(
        model, user_id, movie_node_indices, user_node_indices, data)
    user_recommendations.append(
        {'user_id': user_id, 'top10_movie_ids': top10_movie_ids})
    print("Top 10 movie recommendations for user {}: {}".format(
        user_id, top10_movie_ids))

# 转换为DataFrame并保存到CSV文件
df_recommendations = pd.DataFrame(user_recommendations)
csv_file_path = 'user_recommendations.csv'
df_recommendations.to_csv(csv_file_path, index=False)

print("推荐结果已保存到CSV文件: {}".format(csv_file_path))
