import pandas as pd
import torch
import torch_geometric
from torch_geometric.data import HeteroData
import torch_geometric.transforms as T
from torch_geometric.nn import SAGEConv, to_hetero
from torch import Tensor
import tqdm
import torch.nn.functional as F

# Load the entire movie data frame into memory:
#电影数据集共有三列属性movieId,title,genres
movies_df = pd.read_csv('./ml-latest-small/movies.csv', index_col='movieId')

# 选择电影数据集的第三列genres 转为字符串 以'|'分割 转为one-hot格式
genres = movies_df['genres'].str.get_dummies('|')
print(genres[["Action", "Adventure", "Drama", "Horror"]].head())
# Use genres as movie input features:
#把numpy转成tensor格式
movie_feat = torch.from_numpy(genres.values).to(torch.float)
#assert断言语句 movie_feat维度不能改变即(9742, 20) 修改则报错
assert movie_feat.size() == (9742, 20)  # 20 genres in total.

#加载评分数据 userId,movieId,rating,timestamp
ratings_df = pd.read_csv('./ml-latest-small/ratings.csv')

#userid是从1开始的将它重新映射为从0开始
unique_user_id = ratings_df['userId'].unique()
unique_user_id = pd.DataFrame(data={
    'userId': unique_user_id,
    #range是从[0，unique_user_id]
    'mappedID': pd.RangeIndex(len(unique_user_id)),
})
print("Mapping of user IDs to consecutive values:")
print("==========================================")
print(unique_user_id.head())
print()
# moveid同上 [0, num_movie_nodes):
unique_movie_id = ratings_df['movieId'].unique()
unique_movie_id = pd.DataFrame(data={
    'movieId': unique_movie_id,
    'mappedID': pd.RangeIndex(len(unique_movie_id)),
})
print("Mapping of movie IDs to consecutive values:")
print("===========================================")
print(unique_movie_id.head())
# merge合并操作 how='left'即左连接即保留左边数据框（ratings_df）的所有行，如果右边数据框（
# unique_user_id）中有匹配的键，则将其对应的值合并到结果中；如果没有匹配的键，则在结果中填充NaN。
#ratings_df['userId']是从名为ratings_df的数据框中选择'userId'这一列。unique_user_id是另一个数据框。
#left_on='userId'和right_on='userId'表示在进行合并操作时，以两个数据框中的'userId'字段作为键。
ratings_user_id = pd.merge(ratings_df['userId'], unique_user_id,
                            left_on='userId', right_on='userId', how='left')
#去除映射后的userid即从0开始的userid
ratings_user_id = torch.from_numpy(ratings_user_id['mappedID'].values)
#与上面同理
ratings_movie_id = pd.merge(ratings_df['movieId'], unique_movie_id,
                            left_on='movieId', right_on='movieId', how='left')
ratings_movie_id = torch.from_numpy(ratings_movie_id['mappedID'].values)
# With this, we are ready to construct our `edge_index` in COO format
# following PyG semantics:
#创建边 第一行是用户id 第二行是电影id
edge_index_user_to_movie = torch.stack([ratings_user_id, ratings_movie_id], dim=0)
assert edge_index_user_to_movie.size() == (2, 100836)
print()
print("Final edge indices pointing from users to movies:")
print("=================================================")
print(edge_index_user_to_movie)

#新建一个异构图类
data = HeteroData()
# 两个不同的结点 user和movie
data["user"].node_id = torch.arange(len(unique_user_id))
data["movie"].node_id = torch.arange(len(movies_df))
# data["movie"].x表示movie的特征 movie_feat是一个one-hot编码
data["movie"].x = movie_feat
#我们只有userid 所以user的特征就不传了 后面使用embedding传
#data["user", "rates", "movie"].edge_index定义边的类型 由user到movie的rates类型的边
data["user", "rates", "movie"].edge_index = edge_index_user_to_movie
#转成无向图
data = T.ToUndirected()(data)

#随机切分 分成训练集（消息边 监督边） 测试集
transform = T.RandomLinkSplit(
    #训练集比例
    num_val=0.1,
    #测试集比例
    num_test=0.1,
    #监督边占训练集比例 可以写成0 即消息边占70%
    disjoint_train_ratio=0.3,
    #负样本的比例 正样本即有边的 负样本即没变得 1:2
    neg_sampling_ratio=2.0,
    #在训练集中是否增加负样本 官方建议不加负样本 在训练中随机的选负样本效果更好
    add_negative_train_samples=False,
    #边类型
    edge_types=("user", "rates", "movie"),
    #因为是无向图所以有两条边
    rev_edge_types=("movie", "rev_rates", "user"),
)
train_data, val_data, test_data = transform(data)

# In the first hop, we sample at most 20 neighbors.
# In the second hop, we sample at most 10 neighbors.
# In addition, during training, we want to sample negative edges on-the-fly with
# a ratio of 2:1.
# We can make use of the `loader.LinkNeighborLoader` from PyG:
#from torch_geometric.loader import LinkNeighborLoader

# Define seed edges:
edge_label_index = train_data["user", "rates", "movie"].edge_label_index
edge_label = train_data["user", "rates", "movie"].edge_label
train_loader = torch_geometric.loader.LinkNeighborLoader(
    data=train_data,
    #一阶邻居20个 二阶邻居10个
    num_neighbors=[20, 10],
    #随机取负样本的比例 2.0即每个正样本都有两个负样本
    neg_sampling_ratio=2.0,
    edge_label_index=(("user", "rates", "movie"), edge_label_index),
    edge_label=edge_label,
    batch_size=128,
    shuffle=True,
)



class GNN(torch.nn.Module):
    def __init__(self, hidden_channels):
        super().__init__()
        self.conv1 = SAGEConv(hidden_channels, hidden_channels)
        self.conv2 = SAGEConv(hidden_channels, hidden_channels)

    def forward(self, x: Tensor, edge_index: Tensor) -> Tensor:
        x = F.relu(self.conv1(x, edge_index))
        x = self.conv2(x, edge_index)
        return x


# Our final classifier applies the dot-product between source and destination
#-> Tensor 表示该函数的返回值类型为 Tensor，也就是说，forward 方法返回一个张量
class Classifier(torch.nn.Module):
    def forward(self, x_user: Tensor, x_movie: Tensor, edge_label_index: Tensor) -> Tensor:
        # Convert node embeddings to edge-level representations:
        edge_feat_user = x_user[edge_label_index[0]]
        edge_feat_movie = x_movie[edge_label_index[1]]
        # Apply dot-product to get a prediction per supervision edge:
        return (edge_feat_user * edge_feat_movie).sum(dim=-1)


class Model(torch.nn.Module):
    def __init__(self, hidden_channels):
        super().__init__()
        # Since the dataset does not come with rich features, we also learn two
        # embedding matrices for users and movies:

        self.movie_lin = torch.nn.Linear(20, hidden_channels)
        #把电影和用户向量都embedding为hidden_channels维度向量
        self.user_emb = torch.nn.Embedding(data["user"].num_nodes, hidden_channels)
        self.movie_emb = torch.nn.Embedding(data["movie"].num_nodes, hidden_channels)
        # Instantiate homogeneous GNN:
        self.gnn = GNN(hidden_channels)
        # Convert GNN model into a heterogeneous variant:
        #转为异构图
        self.gnn = to_hetero(self.gnn, metadata=data.metadata())
        self.classifier = Classifier()

    def forward(self, data: HeteroData) -> Tensor:
        x_dict = {
            #user有64维向量
            "user": self.user_emb(data["user"].node_id),
            #movie embedding后64维向量再加全连接的64维向量
            "movie": self.movie_lin(data["movie"].x) + self.movie_emb(data["movie"].node_id),
        }
        #传入点的特征是一个字典 传入边字典
        x_dict = self.gnn(x_dict, data.edge_index_dict)
        pred = self.classifier(
            x_dict["user"],
            x_dict["movie"],
            data["user", "rates", "movie"].edge_label_index,
        )
        return pred


model = Model(hidden_channels=64)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device: '{device}'")
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(1, 6):
    total_loss = total_examples = 0
    for sampled_data in tqdm.tqdm(train_loader):
        optimizer.zero_grad()
        sampled_data.to(device)
        pred = model(sampled_data)
        ground_truth = sampled_data["user", "rates", "movie"].edge_label
        loss = F.binary_cross_entropy_with_logits(pred, ground_truth)
        print(loss)
        loss.backward()
        optimizer.step()
        total_loss += float(loss) * pred.numel()
        total_examples += pred.numel()
    print(f"Epoch: {epoch:03d}, Loss: {total_loss / total_examples:.4f}")

# Define the validation seed edges:
edge_label_index = val_data["user", "rates", "movie"].edge_label_index
edge_label = val_data["user", "rates", "movie"].edge_label
val_loader = torch_geometric.loader.NeighborLoader(
    data=val_data,
    num_neighbors=[20, 10],
    edge_label_index=(("user", "rates", "movie"), edge_label_index),
    edge_label=edge_label,
    batch_size=3 * 128,
    shuffle=False,
)
sampled_data = next(iter(val_loader))

from sklearn.metrics import roc_auc_score
preds = []
ground_truths = []
for sampled_data in tqdm.tqdm(val_loader):
    with torch.no_grad():
        sampled_data.to(device)
        preds.append(model(sampled_data))
        ground_truths.append(sampled_data["user", "rates", "movie"].edge_label)
pred = torch.cat(preds, dim=0).cpu().numpy()
ground_truth = torch.cat(ground_truths, dim=0).cpu().numpy()
auc = roc_auc_score(ground_truth, pred)
print()
print(f"Validation AUC: {auc:.4f}")