import argparse
import datetime
import pickle
import random
import dgl
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
from sklearn.metrics import f1_score
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import matplotlib.pyplot as plt
# 初始化
def set_random_seed(seed=0):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
# todo 定义基本参数 args定义
default_configure = {
    "lr": 0.005,  # Learning rate
    "num_heads": [8],  # Number of attention heads for node-level attention
    "hidden_units": 8,
    "dropout": 0.6,
    "weight_decay": 0.001,
    "num_epochs": 20,
    "patience": 100,
}
sampling_configure = {"batch_size": 20}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
def setup(args):
    # The configuration below is from the paper.
    args.update(default_configure)
    set_random_seed(args["seed"])
    args["dataset"] = "ACM"  # 添加数据集类别
    args["device"] = "cuda:0" if torch.cuda.is_available() else "cpu"
    return args
def get_binary_mask(total_size, indices):
    mask = torch.zeros(total_size)
    mask[indices] = 1
    return mask.byte()
def load_acm():
    data_path = "C:\\Users\\lyolz\\.dgl\\ACM3025.pkl"
    with open(data_path, "rb") as f:
        data = pickle.load(f)
    labels, features = (
        torch.from_numpy(data["label"].todense()).long(),
        torch.from_numpy(data["feature"].todense()).float(),
    )
    num_classes = labels.shape[1]  # 输出特征维度
    labels = labels.nonzero()[:, 1]
    author_g = dgl.from_scipy(data["PAP"])
    subject_g = dgl.from_scipy(data["PLP"])
    gs = [author_g, subject_g]
    train_idx = torch.from_numpy(data["train_idx"]).long().squeeze(0)
    val_idx = torch.from_numpy(data["val_idx"]).long().squeeze(0)
    test_idx = torch.from_numpy(data["test_idx"]).long().squeeze(0)
    num_nodes = author_g.num_nodes()
    train_mask = get_binary_mask(num_nodes, train_idx)
    val_mask = get_binary_mask(num_nodes, val_idx)
    test_mask = get_binary_mask(num_nodes, test_idx)
    train_mask = train_mask.bool()
    val_mask = val_mask.bool()
    test_mask = test_mask.bool()
    features = features.to(device=args["device"])
    labels = labels.to(device=args["device"])
    train_mask = train_mask.to(device=args["device"])
    val_mask = val_mask.to(device=args["device"])
    test_mask = test_mask.to(device=args["device"])
    return (
        gs,
        features,
        labels,
        num_classes,
        train_idx,
        val_idx,
        test_idx,
        train_mask,
        val_mask,
        test_mask,
    )
class EarlyStopping(object):
    def __init__(self, patience=10):
        dt = datetime.datetime.now()
        self.filename = "early_stop_{}_{:02d}-{:02d}-{:02d}.pth".format(
            dt.date(), dt.hour, dt.minute, dt.second
        )
        self.patience = patience
        self.counter = 0
        self.best_acc = None
        self.best_loss = None
        self.early_stop = False
    def step(self, loss, acc, model):
        if self.best_loss is None:
            self.best_acc = acc
            self.best_loss = loss
            self.save_checkpoint(model)
        elif (loss > self.best_loss) and (acc < self.best_acc):
            self.counter += 1
            print(
                f"EarlyStopping counter: {self.counter} out of {self.patience}"
            )
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            if (loss <= self.best_loss) and (acc >= self.best_acc):
                self.save_checkpoint(model)
            self.best_loss = np.min((loss, self.best_loss))
            self.best_acc = np.max((acc, self.best_acc))
            self.counter = 0
        return self.early_stop

    def save_checkpoint(self, model):
        """Saves model when validation loss decreases."""
        torch.save(model.state_dict(), self.filename)

    def load_checkpoint(self, model):
        """Load the latest checkpoint."""
        model.load_state_dict(torch.load(self.filename))
# 定义拓扑不平衡处理方法
def pagerank(graph):
    n = graph.number_of_nodes()
    adj = graph.adjacency_matrix()
    adj=adj.to_dense()
    adj = adj.cpu().numpy()
    adj += np.eye(n)  # 添加自环
    out_degrees = adj.sum(axis=1)
    out_degrees_inv = 1.0 / out_degrees
    P = adj * out_degrees_inv[:, np.newaxis]
    alpha = 1
    teleport = np.ones((n, n)) / n
    M = alpha * P + (1 - alpha) * teleport
    r = np.ones((n, 1)) / n
    for _ in range(100):
        r = np.dot(M, r)
    return r.flatten()  # 展平为一维数组
# 节点度中心性
def degree_centrality_weights(graph):
    degrees = graph.in_degrees().float()  # 计算节点的入度中心性
    weights = degrees / torch.sum(degrees)  # 归一化节点的入度中心性作为权重
    return weights
def compute_path_importance(graphs):
    path_importance = []
    for graph in graphs:
        # 计算每个元路径的平均节点度
        degrees = graph.in_degrees().float()
        average_degree = torch.mean(degrees)
        path_importance.append(average_degree.item())
    # 对元路径重要性指标进行归一化
    path_importance = np.array(path_importance)
    path_importance = path_importance / np.sum(path_importance)
    return path_importance
def topology_imbalance_loss(outputs, labels, graphs):

    path_importance = compute_path_importance(graphs)
    path_importance = torch.from_numpy(path_importance).to(device=args["device"])
    # pagerank_weights = torch.stack([torch.from_numpy(pagerank(graph)) for graph in graphs]).to(outputs.device)
    # weights = degree_centrality_weights(graphs[0])
    # pagerank_weights=torch.from_numpy(pagerank(graphs[0])).to(outputs.device)
    # pagerank_weights=pagerank_weights.mean(dim=0)
    # loss = torch.mean(path_importance * nn.CrossEntropyLoss()(outputs, labels))
    loss = torch.mean(path_importance * nn.CrossEntropyLoss()(outputs, labels))
    return loss
# 性能评估
def score(logits, labels):
    _, indices = torch.max(logits, dim=1)
    prediction = indices.long().cpu().numpy()
    labels = labels.cpu().numpy()
    accuracy = (prediction == labels).sum() / len(prediction)
    micro_f1 = f1_score(labels, prediction, average="micro")
    macro_f1 = f1_score(labels, prediction, average="macro")
    return accuracy, micro_f1, macro_f1
def evaluate(model, g, features, labels, mask, loss_func):
    model.eval()
    with torch.no_grad():
        logits = model(g, features)
    loss = loss_func(logits[mask], labels[mask])
    accuracy, micro_f1, macro_f1 = score(logits[mask], labels[mask])
    return loss, accuracy, micro_f1, macro_f1
# 模型构建
class SemanticAttention(nn.Module):
    def __init__(self, in_size, hidden_size=128):
        super(SemanticAttention, self).__init__()
        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1, bias=False),
        )
    def forward(self, z):
        w = self.project(z).mean(0)  # (M, 1)
        beta = torch.softmax(w, dim=0)  # (M, 1)
        beta = beta.expand((z.shape[0],) + beta.shape)  # (N, M, 1)
        return (beta * z).sum(1)  # (N, D * K)
class HANLayer(nn.Module):
    def __init__(
            self, num_meta_paths, in_size, out_size, layer_num_heads, dropout
    ):
        super(HANLayer, self).__init__()
        self.gat_layers = nn.ModuleList()
        for i in range(num_meta_paths):
            self.gat_layers.append(
                GATConv(
                    in_size,
                    out_size,  # hidden_size
                    layer_num_heads,  # 多头
                    dropout,
                    dropout,
                    activation=F.elu,
                )
            )
        self.semantic_attention = SemanticAttention(
            in_size=out_size * layer_num_heads
        )
        self.num_meta_paths = num_meta_paths
    def forward(self, gs, h):
        semantic_embeddings = []
        for i, g in enumerate(gs):
            semantic_embeddings.append(self.gat_layers[i](g, h).flatten(1))
        semantic_embeddings = torch.stack(
            semantic_embeddings, dim=1
        )  # (N, M, D * K)
        return self.semantic_attention(semantic_embeddings)  # (N, D * K)
class HAN(nn.Module):
    def __init__(
            self, num_meta_paths, in_size, hidden_size, out_size, num_heads, dropout
    ):
        super(HAN, self).__init__()
        # 线性层
        self.layers = nn.ModuleList()
        self.layers.append(
            HANLayer(
                num_meta_paths, in_size, hidden_size, num_heads[0], dropout
            )
        )
        # todo 多头
        for l in range(1, len(num_heads)):
            self.layers.append(
                HANLayer(
                    num_meta_paths,
                    hidden_size * num_heads[l - 1],
                    hidden_size,
                    num_heads[l],
                    dropout,
                )
            )
        self.predict = nn.Linear(hidden_size * num_heads[-1], out_size)
    def forward(self, g, h):
        for gnn in self.layers:
            h = gnn(g, h)
        return self.predict(h)
# main
# todo 设置程序目录
parser = argparse.ArgumentParser("HAN")
parser.add_argument("-s", "--seed", type=int, default=1, help="Random seed")
parser.add_argument(
    "-ld",
    "--log-dir",
    type=str,
    default="results",
    help="Dir for saving training results",
)
args = parser.parse_args(args=[]).__dict__
args = setup(args)
(
    g,  # 图结构
    features,  # 特征1870维
    labels,
    num_classes,
    train_idx,
    val_idx,
    test_idx,
    train_mask,
    val_mask,
    test_mask,
) = load_acm()
model = HAN(
    num_meta_paths=len(g),  # 图数量
    in_size=features.shape[1],  # 特征维度
    hidden_size=args["hidden_units"],
    out_size=num_classes,  # 分类数量
    num_heads=args["num_heads"],  # 头数量
    dropout=args["dropout"],
).to(device=args["device"])
g = [graph.to(device=args["device"]) for graph in g]
stopper = EarlyStopping(patience=args["patience"])
loss_fcn = torch.nn.CrossEntropyLoss()  # 交叉熵损失
optimizer = torch.optim.Adam(
    model.parameters(), lr=args["lr"], weight_decay=args["weight_decay"]
)
criterion = topology_imbalance_loss
for epoch in range(args["num_epochs"]):
    model.train()
    optimizer.zero_grad()
    outputs = model(g, features)
    # loss = loss_fcn(outputs, labels)
    loss = criterion(outputs, labels, g)
    loss.backward()  # 反向传播
    optimizer.step()
    train_acc, train_micro_f1, train_macro_f1 = score(
        outputs[train_mask], labels[train_mask]
    )
    val_loss, val_acc, val_micro_f1, val_macro_f1 = evaluate(
        model, g, features, labels, val_mask, loss_fcn
    )
    print(
        "Epoch {:d} | Train Loss {:.4f} | Train Micro f1 {:.4f} | Train Macro f1 {:.4f} | "
        "Val Loss {:.4f} | Val Micro f1 {:.4f} | Val Macro f1 {:.4f}".format(
            epoch + 1,
            loss.item(),
            train_micro_f1,
            train_macro_f1,
            val_loss.item(),
            val_micro_f1,
            val_macro_f1,
        )
    )
    train_loss, train_acc, _, _ = evaluate(model, g, features, labels, train_mask, loss_fcn)
    val_loss, val_acc, _, _ = evaluate(model, g, features, labels, val_mask, loss_fcn)

    train_losses.append(train_loss.item())
    train_accuracies.append(train_acc.item())
    val_losses.append(val_loss.item())
    val_accuracies.append(val_acc.item())

# train_losses = train_losses
# train_accuracies = train_accuracies
# val_losses = val_losses.numpy()
# val_accuracies = val_accuracies.numpy()
# 绘制损失值曲线
plt.figure()
plt.plot(range(1, args["num_epochs"] + 1), train_losses, label='Train')
plt.plot(range(1, args["num_epochs"] + 1), val_losses, label='Validation')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Validation Loss Topology-Imbalance Learning')
plt.legend()
plt.show()

# 绘制准确率曲线
plt.figure()
plt.plot(range(1, args["num_epochs"] + 1), train_accuracies, label='Train')
plt.plot(range(1, args["num_epochs"] + 1), val_accuracies, label='Validation')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy Topology-Imbalance Learning')
plt.legend()
plt.show()
# stopper.load_checkpoint(model)
test_loss, test_acc, test_micro_f1, test_macro_f1 = evaluate(
    model, g, features, labels, test_mask, loss_fcn
)
print(
    "Test acc {:.4f} | Test Micro f1 {:.4f} | Test Macro f1 {:.4f}".format(
        test_acc.item(), test_micro_f1, test_macro_f1
    )
)
