import torch
import random
import numpy as np
import os
import dgl
from dgl.data.utils import load_graphs
from .lex_gnn_model import LEXGNN
from .lex_gnn_utils import train, test
from sklearn.model_selection import train_test_split


def set_random_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    os.environ['PYTHONHASHSEED'] = str(seed)

def lex_gnn_main(args):
    # Set seed
    set_random_seed(args['seed'])

    # GPU
    device = torch.device(args['cuda_id'])
    torch.cuda.set_device(device)

    # Load preprocessed graph
    if args['data_name'] == 'yelp':
        graph_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "data", "graph-yelp.bin")
    else:
        graph_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "data", "graph-amazon.bin")

    # Load graph data
    graphs, _ = load_graphs(graph_path)
    g = graphs[0].to(device)

    # Data split
    labels = g.ndata['label'].cpu().numpy()
    index = list(range(len(labels)))
    idx_unlabeled = 3305  # Adjust based on your dataset
    idx_train, idx_rest, y_train, y_rest = train_test_split(
        index[idx_unlabeled:], labels[idx_unlabeled:],
        stratify=labels[idx_unlabeled:],
        train_size=args['train_ratio'], random_state=args['seed'], shuffle=True
    )
    idx_valid, idx_test, y_valid, y_test = train_test_split(
        idx_rest, y_rest, stratify=y_rest,
        test_size=args['test_ratio'], random_state=args['seed'], shuffle=True
    )

    # Masking nodes
    y_mask = labels.copy()
    y_mask[index[:idx_unlabeled] + idx_test + idx_valid] = 2
    g.ndata["y_mask"] = torch.LongTensor(y_mask).to(device)
    g.ndata["x"] = g.ndata["feat"].contiguous().to(device)

    # Batch loader
    n_sample = {}
    for e in g.etypes:
        n_sample[e] = 50

    n_samples = [n_sample] * args['n_layer']

    edge_probs = {}
    for etype in g.canonical_etypes:
        src, dst = g.edges(etype=etype)
        src = src.clone().detach().to(device)
        y_mask_tensor = torch.LongTensor(y_mask).to(device)
        prob = torch.where(y_mask_tensor[src] == 2, 0.5, 0.9).to(device)
        edge_probs[etype] = prob
        g.edges[etype].data['prob'] = prob

    sampler = dgl.dataloading.NeighborSampler(n_samples, prob='prob')
    train_loader = dgl.dataloading.DataLoader(
        g, idx_train, sampler, batch_size=args['batch_size'], shuffle=True, drop_last=False
    )
    valid_loader = dgl.dataloading.DataLoader(
        g, idx_valid, sampler, batch_size=args['batch_size'], shuffle=False, drop_last=False
    )
    test_loader = dgl.dataloading.DataLoader(
        g, idx_test, sampler, batch_size=args['batch_size'], shuffle=False, drop_last=False
    )

    # Define model
    model = LEXGNN(
        in_dim=g.ndata["feat"].shape[1],
        n_class=2,
        hidden_dim=args['n_hidden'],
        n_layer=args['n_layer'],
        num_heads=args['n_head'],
        dropout=args['dropout']
    ).to(device)

    # Train 
    model_best, ep, et = train(
        model, train_loader, valid_loader, args['epochs'], args['valid_epochs'], 
        args['beta'], args['lr'], args['wd'], args['early_stop'], args['seed'], device
    )

    # Test 
    auc, f1, gm, ap, auc1 = test(model_best, test_loader, device)
    print('===================================')
    print(f'AUC_cls: {auc:.4f} | AUC_pre: {auc1:.4f} | F1-macro: {f1:.4f} | G-mean: {gm:.4f} | AP: {ap:.4f} | Epoch-time: {et:.4f}') 
    print()  