from models.gcn import gcn
from models.gin import gin
from models.gat import gat
from models.gcn_sub import gcns
from models.gat_plus import gatp
from utils import gnn_load_data, gcn_preprocessing, gin_preprocessing, gat_preprocessing
from train import train
import configparser
import argparse
import os
import ast

parser = argparse.ArgumentParser(description='GNN')
parser.add_argument('--config', type=str, default="./config/template.ini",
                    help='path of config file (default: ./config/template.ini)')
args = parser.parse_args()

config = configparser.ConfigParser()
file_name = args.config
config.read(file_name, encoding='utf-8')

all_models = config.sections()
for model in all_models:
    print('========{}========'.format(model))
    istrain = ast.literal_eval(config.get(model, 'train'))
    
    gnntype = config.get(model, 'gnntype')
    print('gnntype:         {}'.format(gnntype))
    
    hidlayer_size = int(config.get(model, 'hidlayer_size'))
    print('hidlayer_size:   {}'.format(hidlayer_size))
    
    hidlayer_num = int(config.get(model, 'hidlayer_num'))
    print('hidlayer_num:    {}'.format(hidlayer_num))

    if gnntype == 'gin':
        mlp_layer_num = int(config.get(model, 'mlp_layer_num'))
        print('mlp_layer_num:   {}'.format(mlp_layer_num))
    
    dataset_path = config.get(model, 'dataset_path')
    print('dataset_path:    {}'.format(dataset_path))
    
    dataset_name = config.get(model, 'dataset_name')
    print('dataset_name:    {}'.format(dataset_name))

    sumnormalize = ast.literal_eval(config.get(model, 'sumnormalize'))
    print('sumnormalize:    {}'.format(sumnormalize))
    
    total_epoch = int(config.get(model, 'total_epoch'))
    print('total_epoch:     {}'.format(total_epoch))

    test_every = int(config.get(model, 'test_every'))
    print('test_every:     {}'.format(test_every))
    
    train_size = int(config.get(model, 'train_size'))
    print('train_size:      {}'.format(train_size))
    
    val_size = int(config.get(model, 'val_size'))
    print('val_size:        {}'.format(val_size))
    
    test_size = int(config.get(model, 'test_size'))
    print('test_size:       {}'.format(test_size))
    
    lr = float(config.get(model, 'lr'))
    print('lr:              {}'.format(lr))
    
    wd = float(config.get(model, 'wd'))
    print('wd:              {}'.format(wd))

    log = ast.literal_eval(config.get(model, 'log'))
    print('log:             {}'.format(log))

    print()

    if istrain is False:
        print('{} will not be trained'.format(model))
        print()
        continue

    if log is True:
        if not os.path.exists('./logs/{}/'.format(model)):
            os.makedirs('./logs/{}/'.format(model))
        config_log = './logs/{}/config.txt'.format(model)
        train_log = './logs/{}/train.txt'.format(model)
        test_log = './logs/{}/test.txt'.format(model)
        train_log_obj = open(train_log, mode='w', encoding='utf-8')
        test_log_obj = open(test_log, mode='w', encoding='utf-8')
        config_log_obj = open(config_log, mode='w', encoding='utf-8')
        config_log_obj.write(str(config.items(model)))
        config_log_obj.close()
        logs = {}
        logs['train_log'] = train_log_obj
        logs['test_log'] = test_log_obj
    else:
        logs = None

    # global data
    features, adj, labels = gnn_load_data(path=dataset_path, name=dataset_name)

    if gnntype == 'gcn':
        train_features, train_adj = gcn_preprocessing(features, adj, sumnormalize)
        train_model = gcn(insize=train_features.shape[1],
                          outsize=labels.max().item() + 1,
                          hidsize=hidlayer_size,
                          hidlayernum=hidlayer_num)
    
    elif gnntype == 'gcns':
        train_features, train_adj = gcn_preprocessing(features, adj, sumnormalize)
        train_model = gcns(insize=train_features.shape[1],
                           outsize=labels.max().item() + 1,
                           hidsize=hidlayer_size,
                           hidlayernum=hidlayer_num)
    
    elif gnntype == 'gin':
        train_features, train_adj = gin_preprocessing(features, adj, sumnormalize)
        train_model = gin(insize=train_features.shape[1],
                          outsize=labels.max().item() + 1,
                          hidsize=hidlayer_size,
                          hidlayernum=hidlayer_num,
                          mlp_layer_num=mlp_layer_num,
                          mlp_layer_size=hidlayer_size)

    elif gnntype == 'gat':
        train_features, train_adj = gat_preprocessing(features, adj, sumnormalize)
        train_model = gatp(insize=train_features.shape[1],
                          outsize=labels.max().item() + 1,
                          hidsize=hidlayer_size,
                          hidlayernum=hidlayer_num)

    elif gnntype == 'gatp':
        train_features, train_adj = gat_preprocessing(features, adj, sumnormalize)
        train_model = gatp(insize=train_features.shape[1],
                          outsize=labels.max().item() + 1,
                          hidsize=hidlayer_size,
                          hidlayernum=hidlayer_num)

    else:
        raise ValueError('gnntype error')
    
    train(model=train_model,
          features=train_features,
          adj=train_adj,
          labels=labels,
          total_epoch=total_epoch,
          train_size=train_size,
          val_size=val_size,
          test_size=test_size,
          lr=lr,
          weight_decay=wd,
          logs=logs,
          test_every=test_every)

    if logs is not None:
        for fileobj in logs:
            logs[fileobj].close()
