def toList(fileHandle,sep,filename):
    readlines = fileHandle.readlines()
    with open(filename,'w') as f2:
        for i in readlines:
            i = i.strip()
            l = i.split(sep)
            f2.write(str(l)+"\n")

    return fileHandle

def mergeDFCOLS(*args):
    from pandas.core.frame import DataFrame
    lens = len(args)
    res = dict()
    for i in range(len(args)):
        res[str(i)] = args[i]
    return DataFrame(res)



def myArgParser():
    import math
    import logging
    import time
    import random
    import sys
    import argparse


    ### Argument and global variables
    parser = argparse.ArgumentParser('Interface for TGSRec experiments on link predictions')
    parser.add_argument('-d', '--data', type=str, help='data sources to use, try wikipedia or reddit',
                        default='ml-100k')
    parser.add_argument('--bs', type=int, default=200, help='batch_size')
    parser.add_argument('--prefix', type=str, default='', help='prefix to name the checkpoints')
    parser.add_argument('--n_degree', type=int, default=20, help='number of neighbors to sample')
    parser.add_argument('--n_head', type=int, default=2, help='number of heads used in attention layer')
    parser.add_argument('--n_epoch', type=int, default=200, help='number of epochs')
    parser.add_argument('--n_layer', type=int, default=2, help='number of network layers')
    parser.add_argument('--lr', type=float, default=0.005, help='learning rate')
    parser.add_argument('--drop_out', type=float, default=0.1, help='dropout probability')
    parser.add_argument('--reg', type=float, default=0.1, help='regularization')
    parser.add_argument('--gpu', type=int, default=0, help='idx for the gpu to use')
    parser.add_argument('--node_dim', type=int, default=20, help='Dimentions of the node embedding')
    parser.add_argument('--time_dim', type=int, default=20, help='Dimentions of the time embedding')
    parser.add_argument('--agg_method', type=str, choices=['attn', 'lstm', 'mean'], help='local aggregation method',
                        default='attn')
    parser.add_argument('--attn_mode', type=str, choices=['prod', 'map'], default='prod',
                        help='use dot product attention or mapping based')
    parser.add_argument('--time', type=str, choices=['time', 'pos', 'empty', 'disentangle'],
                        help='how to use time information', default='time')
    parser.add_argument('--uniform', action='store_true', help='take uniform sampling from temporal neighbors')
    parser.add_argument('--new_node', action='store_true', help='model new node')
    parser.add_argument('--samplerate', type=float, default=1.0, help='samplerate for each user')
    parser.add_argument('--popnegsample', action='store_true', help='use popularity based negative sampling')
    parser.add_argument('--timepopnegsample', action='store_true', help='use timely popularity based negative sampling')
    parser.add_argument('--negsampleeval', type=int, default=-1,
                        help='number of negative sampling evaluation, -1 for all')
    parser.add_argument('--disencomponents', type=int, help='number of various time encoding')

    args = parser.parse_args()
    return args

def myLogger():
    import math
    import logging
    import time
    import random
    import sys
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    fh = logging.FileHandler('log/{}.log'.format(str(time.time())))
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.WARN)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger
