def load_dataset(dataset, dbname, alleletype, jobs, recreate, shard, test):
    from utils import get_conf, pg_url
    import sys, os
    conf = get_conf()
    usr = os.popen('whoami').read().strip()
    dbuser = conf["dbuser"]
    password = conf["password"]
    portnum = conf["portnum"]
    dbstring_usr = "%s://%s@:%s/postgres"%(pg_url(), usr, portnum)
    dbstring = "%s://%s:%s@localhost:%s/postgres"%(pg_url(), dbuser, password, portnum)
    dbstring_dbname = "%s://%s:%s@localhost:%s/%s"%(pg_url(), dbuser, password, portnum, dbname)
    from utils import create_engine_wrapper
    from dbutils import dbuser_exists, db_exists, schema_exists
    db_usr = create_engine_wrapper(dbstring_usr)
    db = create_engine_wrapper(dbstring)
    db_dbname = create_engine_wrapper(dbstring_dbname)
    if not db_exists(db, dbname):
        sys.exit("database '%s' does not exist\nrun the init_db.py script, exiting"%dbname)
    if not dbuser_exists(db_usr, dbuser):
        sys.exit("dbuser '%s' does not exist\nrun the init_db.py script, exiting"%dbuser)
    from dbutils import get_db_type
    dbtype = get_db_type(dbstring_dbname)
    if dbtype not in conf.sections:
        sys.exit("dbtype must be one of %s, not %s.\nWas the database %s created with init_db.py?"%(', '.join(conf.sections), dbtype, dbstring_dbname))
    datasets = conf[dbtype].sections
    if dataset not in datasets:
        sys.exit("error, dataset %s is not listed in the config as one of the datasets associated with database type %s"%(dataset, dbtype))
    from utils import randstr
    rstr = randstr()
    tmpdir = os.path.join(conf['tmpdir'], 'tmp_%s'%rstr)
    if "annotfile" in conf[dbtype][dataset]:
        annotfile = conf[dbtype][dataset]["annotfile"]
    elif "annotfile" in conf[dbtype]:
        annotfile = conf[dbtype]["annotfile"]
    phenofiles = conf[dbtype][dataset]["phenofiles"]
    from truncate_genofile import truncate_genofile
    if test:
        genofile = os.path.basename(conf[dbtype][dataset]["genofile"]) + '.trunc'
        genofile = os.path.join(tmpdir, genofile)
        truncate_genofile(tmpdir, dataset, dbname)
        from sqlalchemy.orm import clear_mappers
        clear_mappers()
    else:
        genofile = conf[dbtype][dataset]["genofile"]

    # Choose schema depending on shard and test options.
    if shard and test:
            schema = dataset + "_shard_test"
    elif shard:
        schema = dataset + "_shard"
    elif test:
        schema = dataset + "_test"
    else:
        schema = dataset
    from init_schema import init_schema
    if recreate == True:
        init_schema(schema, dbname, True)
    elif recreate == False:
        init_schema(schema, dbname, False)
    else:
        init_schema(schema, dbname, None)
    import plat
    if dbtype == 'affy6':
        if shard:
            p = plat.Shard_Affy6(phenofiles, annotfile, genofile, schema, dbtype, dbstring_dbname, tmpdir, jobs)
        else:
            p = plat.Single_Affy6(phenofiles, annotfile, genofile, schema, dbtype, dbstring_dbname, tmpdir, jobs)
    elif dbtype == 'illumina':
        if shard:
            p = plat.Shard_Illumina(phenofiles, annotfile, genofile, schema, alleletype, dbtype, dbstring_dbname, tmpdir, jobs)
        else:
            p = plat.Single_Illumina(phenofiles, annotfile, genofile, schema, alleletype, dbtype, dbstring_dbname, tmpdir, jobs)
    try:
        p.load_dataset()
    finally:
        if os.path.exists(tmpdir):
            import shutil
            shutil.rmtree(tmpdir)

if __name__ == '__main__':
    import sys, os
    usr = os.popen('whoami').read().strip()
    from optparse import OptionParser
    usage = "'%prog [-a alleletype] [-j jobs] [-n] [-s] [-t] [-y] dataset db'. Using the value dbtype_" + usr + " for db is recommended, where dbtype can be either 'affy6' or 'illumina'. Alleletype option is only required if dbtype is 'illumina', with possible values 'forward' and 'top'."
    parser = OptionParser(usage=usage)
    parser.add_option("-a", "--allele-type", action="store", type="string", dest="alleletype", help="allele type")
    parser.add_option("-j", "--jobs", action="store", type="int", dest="jobs", help="number of jobs to run in parallel. Default is 1")
    parser.add_option("-n", "--no-recreate", action="store_false", dest="recreate", help="do not recreate schema")
    parser.add_option("-s", "--sharding", action="store_true", dest="shard", help="use geno table sharding")
    parser.add_option("-t", "--test", action="store_true", dest="test", help="test version")
    parser.add_option("-y", "--recreate", action="store_true", dest="recreate", help="delete schema and recreate")
    parser.set_defaults(jobs=1)
    (options, args) = parser.parse_args()

    if len(args) != 2:
        print "incorrect number of arguments"
        parser.print_help()
        sys.exit(1)

    dataset = args[0]
    dbname = args[1]

    load_dataset(dataset, dbname, options.alleletype, options.jobs, options.recreate, options.shard, options.test)
