from utils import print_timing

def get_dataset(schema):
   if '_shard_test' in schema:
      dataset = schema.split("_shard_test")[0]
   elif '_shard' in schema:
      dataset = schema.split("_shard")[0]
   elif '_test' in schema:
      dataset = schema.split("_test")[0]
   else:
      dataset = schema
   return dataset

def get_allelea_alleleb(alleletype):
   if alleletype == None:
      allelea = "allelea_id"
      alleleb = "alleleb_id"
   elif alleletype == "forward":
      allelea = "allelea_forward_id"
      alleleb = "alleleb_forward_id"
   elif alleletype == "top":
      allelea = "allelea_top_id"
      alleleb = "alleleb_top_id"
   else:
      print "create_anno_table: alleletype %s unknown, cannot proceed"%alleletype
      sys.exit(1)
   return allelea, alleleb

def return_db_layout(dbstring, schemas):
   import sys
   from utils import create_engine_wrapper
   db = create_engine_wrapper(dbstring)
   conn = db.connect()
   from dbutils import tables_match_geno_single, tables_match_geno_shard
   match_geno_single = []
   match_geno_shard = []
   for schema in schemas:
      match_geno_single.append(tables_match_geno_single(conn, schema))
      match_geno_shard.append(tables_match_geno_shard(conn, schema))
   schemalen = len(schemas)
   schemas_unmatched = [schema for schema, i, j in zip(schemas, match_geno_single, match_geno_shard) if i==False and j==False]
   if schemas_unmatched:
      schemalist = ', '.join(schemas)
      sys.exit("Error: schemas %s do not match either geno single or geno shard dataset layouts"%schemalist)
   if match_geno_single == [True]*schemalen and match_geno_shard == [False]*schemalen:
      return "geno_single"
   elif match_geno_single == [False]*schemalen and match_geno_shard == [True]*schemalen:
      return "geno_shard"
   else:
      schemalist = ', '.join(schemas)
      sys.exit("Error: schemas %s are not all geno single or all geno shard formats"%schemalist)

def create_schemas_and_tables_filter(Session, dbtype, rstr, schemas):
   from utils import get_conf, randstr
   conf = get_conf()
   schemas_filter = []
   idlink_filter = get_idlink_filter(dbtype, schemas)
   pheno_filter = get_pheno_filter(dbtype, schemas)
   for k, schema in enumerate(schemas):
      dataset = get_dataset(schema)
      if 'idlink_filter' in conf[dbtype][dataset] or 'pheno_filter' in conf[dbtype][dataset]:
         Session.execute("DROP SCHEMA IF EXISTS %(schema)s CASCADE; CREATE SCHEMA %(schema)s;"%{'schema':schema+'_filter'+'_'+rstr})
         schemas_filter.append(schema+'_filter' + '_' + rstr)
         create_filtered_idlink_tables(Session, dbtype, schema, schemas_filter[k], idlink_filter[k])
         create_filtered_pheno_tables(Session, dbtype, schema, schemas_filter[k], pheno_filter[k])
      else:
         schemas_filter.append(schema)
   Session.commit()
   return schemas_filter

def get_anno_filters(dbtype, schemas):
   from utils import get_conf
   conf = get_conf()
   anno_filter = []
   for schema in schemas:
      dataset = get_dataset(schema)
      if 'anno_filter' in conf[dbtype][dataset]:
         anno_filter.append(conf[dbtype][dataset]['anno_filter'])
      else:
         anno_filter.append("TRUE")
   return anno_filter

def get_idlink_filter(dbtype, schemas):
   from utils import get_conf
   conf = get_conf()
   idlink_filter = []
   for schema in schemas:
      dataset = get_dataset(schema)
      if 'idlink_filter' in conf[dbtype][dataset]:
         idlink_filter.append(conf[dbtype][dataset]['idlink_filter'])
      else:
         idlink_filter.append("TRUE")
   return idlink_filter

def get_pheno_filter(dbtype, schemas):
   from utils import get_conf
   conf = get_conf()
   pheno_filter = []
   for schema in schemas:
      dataset = get_dataset(schema)
      if 'pheno_filter' in conf[dbtype][dataset]:
         pheno_filter.append(conf[dbtype][dataset]['pheno_filter'])
      else:
         pheno_filter.append("TRUE")
   return pheno_filter

def create_filtered_idlink_tables(session, dbtype, schema, schema_filter, idlink_filter):
   from utils import get_conf, text_autocommit
   conf = get_conf()
   create_idlink_table = text_autocommit("DROP TABLE IF EXISTS %(schema_filter)s.idlink; CREATE TABLE %(schema_filter)s.idlink AS (SELECT * FROM %(schema)s.idlink where %(idlink_filter)s)"%{"schema":schema, "schema_filter":schema_filter, "idlink_filter":idlink_filter})
   session.execute(create_idlink_table)
   session.commit()

def create_filtered_pheno_tables(session, dbtype, schema, schema_filter, pheno_filter):
   from utils import get_conf, text_autocommit
   conf = get_conf()
   create_pheno_table = text_autocommit("DROP TABLE IF EXISTS %(schema_filter)s.pheno; CREATE TABLE %(schema_filter)s.pheno AS (SELECT * FROM %(schema)s.pheno where %(pheno_filter)s)"%{"schema":schema, "schema_filter":schema_filter, "pheno_filter":pheno_filter})
   session.execute(create_pheno_table)
   session.commit()
   return pheno_filter

def create_merged_anno_table_single(session, dbtype, alleletype, sm, schemas):
   from utils import text_autocommit
   import sys
   anno_filters = get_anno_filters(dbtype, schemas)
   allelea, alleleb = get_allelea_alleleb(alleletype)
   ctes = ["dp_anno_%(s)s AS \
     ( SELECT id, %(allelea)s, %(alleleb)s, rsid, chromosome, location \
     FROM \
             (SELECT  anno.*, \
                      row_number() OVER(PARTITION BY anno.rsid ORDER BY anno.id) \
             FROM     %(s)s.anno \
                      INNER JOIN %(s)s.geno \
                      ON       anno.id = geno.anno_id \
             WHERE    idlink_id        = \
                      (SELECT MIN(idlink.id) \
                      FROM    %(s)s.idlink \
                      ) AND \
                      %(anno_filter)s \
             ) AS q \
     WHERE   row_number = '1' \
     ) \
   """%{"s":schema, "allelea":allelea, "alleleb":alleleb, "anno_filter":anno_filters[k]} for k, schema in enumerate(schemas)]
   sel = " INTERSECT ".join(["SELECT * FROM dp_anno_%(s)s"%{'s':schema} for schema in schemas])
   create_anno_table = text_autocommit("DROP TABLE IF EXISTS %(sm)s.anno; CREATE TABLE %(sm)s.anno AS (WITH "%{'sm':sm} + ','.join(ctes) + sel + ")")
   session.execute(create_anno_table)
   session.commit()
   if not session.execute("SELECT * FROM %(sm)s.anno;"%{'sm':sm}).fetchall():
      sys.exit("The set of annotations selected is empty. Something is wrong. Check that all tables are non-empty and also check filter settings. Exiting")
   session.commit()

def create_merged_anno_table_shard(session, dbtype, alleletype, sm, schemas):
   from utils import text_autocommit
   import sys
   anno_filter = get_anno_filters(dbtype, schemas)
   allelea, alleleb = get_allelea_alleleb(alleletype)
   idlist = []
   for schema in schemas:
      ids = session.execute("select * from %(schema)s.geno_idlink"%{'schema':schema}).fetchall()
      ids = [tup[0] for tup in ids]
      ids.sort()
      idlist.append(ids[0])
   ctes = ["dp_anno_%(s)s AS \
     ( SELECT id, %(allelea)s, %(alleleb)s, rsid, chromosome, location \
     FROM \
             (SELECT  anno.*, \
                      row_number() OVER(PARTITION BY anno.rsid ORDER BY anno.id) \
             FROM     %(s)s.anno \
                      INNER JOIN %(s)s.geno%(id)s \
                      ON       anno.id = geno%(id)s.anno_id \
             WHERE    idlink_id        = \
                      (SELECT MIN(idlink.id) \
                      FROM    %(s)s.idlink \
                      ) AND \
                      %(anno_filter)s \
             ) AS q \
     WHERE   row_number = '1' \
     ) \
   """%{"s":schema, "allelea":allelea, "alleleb":alleleb, "anno_filter":anno_filter[k], "id":idlist[k]} for k, schema in enumerate(schemas)]
   sel = " INTERSECT ".join(["SELECT * FROM dp_anno_%(s)s"%{'s':schema} for schema in schemas])
   create_anno_table = text_autocommit("DROP TABLE IF EXISTS %(sm)s.anno; CREATE TABLE %(sm)s.anno AS (WITH "%{'sm':sm} + ','.join(ctes) + sel + ")")
   session.execute(create_anno_table)
   session.commit()
   count = session.execute("SELECT * FROM %(sm)s.anno;"%{'sm':sm}).rowcount
   if not count:
      import sys
      sys.exit("The set of annotations selected is empty. Something is wrong. Check that all tables are non-empty and also check filter settings. Exiting")

class write_map_file(object):
   def __init__(self, create_anno_table_fn):
      self.create_anno_table_fn = create_anno_table_fn
   @print_timing
   def __call__(self, session, dbstring, mapfilename, alleletype, schemas, rstr, col=True):
      import sys, traceback
      from sqlalchemy.sql import select
      from utils import create_engine_wrapper
      import os, sys
      from utils import randstr
      from dbutils import get_db_type
      dbtype = get_db_type(dbstring)
      if dbtype == 'affy6' and alleletype != None:
         sys.exit("database is marked as 'affy6', so alleletype option not required - you have used %s as alleletype."%alleletype)
      if dbtype == 'illumina':
         if alleletype != 'forward' and alleletype != 'top':
            sys.exit("database is marked as 'illumina', so alleletype must be either 'forward' or 'top', not %s"%alleletype)
      if os.path.exists(mapfilename) and os.path.getsize(mapfilename) > 0:
         sys.exit("%s already exists, exiting"%mapfilename)
      print "writing map file %s..."%(mapfilename)
      mf = open(mapfilename, 'w')
      from utils import text_autocommit
      schemanum = len(schemas)
      sm = '_'.join(schemas) + '_merge' + '_' + rstr
      session.execute("DROP SCHEMA IF EXISTS %(sm)s CASCADE; CREATE SCHEMA %(sm)s;"%{'sm':sm})
      session.commit()
      self.create_anno_table_fn(session, dbtype, alleletype, sm, schemas)
      from dbutils import schema_exists
      s = text_autocommit(
      """
      SET search_path TO %(schemaname)s, public;
      SELECT chromo.name, rsid, location
      FROM %(sm)s.anno INNER JOIN chromo ON %(sm)s.anno.chromosome = chromo.id
      GROUP BY chromo.name, rsid, location order BY min(%(sm)s.anno.id)
      """
      %({'schemaname':schemas[0], 'sm':sm}))
      result = session.execute(s).fetchall()
      if col == True:
         mf.write("\t".join(["chr","rsid","pos"])+'\n')
      for row in result:
         row = [str(x) for x in row]
         mf.write("\t".join(row)+'\n')
      mf.close()

write_map_file_single = write_map_file(create_merged_anno_table_single)
write_map_file_shard = write_map_file(create_merged_anno_table_shard)

###############################################################

def pedstr(Session, schema, sm, allelea, alleleb, tmpdir, pedfilename, i):
   import os.path
   from utils import text_autocommit
   p = text_autocommit("""
   SET search_path TO %(schema)s, public;
   SELECT array_to_string(array_agg(gvals.g), E'\t') AS str
   FROM
   (
   SELECT decode_genotype(geno%(id)s.snpval_id, %(allelea)s, %(alleleb)s) AS g, geno%(id)s.anno_id
   FROM    geno%(id)s
   INNER JOIN %(sm)s.anno
   ON      geno%(id)s.anno_id = %(sm)s.anno.id
   ORDER BY anno_id
   ) AS gvals
   """%({"schema":schema, "sm":sm, "allelea":allelea, "alleleb":alleleb, "id":i}))
   tmpfilename = os.path.join(tmpdir, os.path.basename(pedfilename)+".%(schema)s.part%(i)s"%{'schema':schema, 'i':i})
   pf = open(tmpfilename, 'w')
   res = Session.execute(p).fetchall()[0][0]
   if res is not None:
      pf.write(res+'\n')
   Session.commit()

@print_timing
def write_ped_file_shard(session, dbstring, pedfilename, alleletype, jobs, schemas, rstr):
   import os, shutil, sys
   from utils import create_engine_wrapper, text_autocommit
   from sqlalchemy.orm import scoped_session, sessionmaker
   from threadpool import ThreadPool
   from utils import randstr, safe_mkdir
   from dbutils import get_db_type
   dbtype = get_db_type(dbstring)
   if dbtype == 'affy6' and alleletype != None:
         sys.exit("database is marked as 'affy6', so alleletype option not required - you have used %s as alleletype."%alleletype)
   if dbtype == 'illumina':
      if alleletype != 'forward' and alleletype != 'top':
         sys.exit("database is marked as 'illumina', so alleletype must be either 'forward' or 'top', not %s"%alleletype)
   if os.path.exists(pedfilename) and os.path.getsize(pedfilename) > 0:
      sys.exit("%s already exists, exiting"%pedfilename)
   from utils import get_conf
   conf = get_conf()
   tmpdir = os.path.join(conf['tmpdir'], 'tmp_%s'%rstr)
   safe_mkdir(tmpdir)
   allelea, alleleb = get_allelea_alleleb(alleletype)
   print "writing ped file %s..."%(pedfilename)
   from dbfunctions import create_or_replace_plpgsql, create_decode_genotype_function
   sm = '_'.join(schemas) + '_merge' + '_' + rstr
   session.execute(create_or_replace_plpgsql)
   session.execute(create_decode_genotype_function)
   session.commit()
   pool = ThreadPool(session, jobs)
   session.execute("DROP SCHEMA IF EXISTS %(sm)s CASCADE; CREATE SCHEMA %(sm)s;"%{'sm':sm})
   session.commit()
   create_merged_anno_table_shard(session, dbtype, alleletype, sm, schemas)
   schemas_filter = create_schemas_and_tables_filter(session, dbtype, rstr, schemas)
   idlist = []
   for k, schema in enumerate(schemas):
      q = text_autocommit("select geno_idlink.id FROM %(schema_filter)s.pheno INNER JOIN %(schema_filter)s.idlink ON pheno.patientid = idlink.patientid INNER JOIN %(schema)s.geno_idlink  ON idlink.id = geno_idlink.id"%{'schema':schema, 'schema_filter':schemas_filter[k]})
      ids = session.execute(q).fetchall()
      session.commit()
      ids = [tup[0] for tup in ids]
      ids.sort()
      idlist.append(ids)
      for i in ids:
         pool.add_task(pedstr, session, schema, sm, allelea, alleleb, tmpdir, pedfilename, i)
   pool.start()
   #wait on the queue until everything has been processed
   pool.wait_completion()
   phenostringsdict = []
   for k, schema in enumerate(schemas):
      pq = text_autocommit("""
      SET search_path TO %(schema)s, public;
      SELECT
                  pheno.famid
                  || E'\t'
                  || idlink.expid
                  || E'\t'
                  || 0
                  || E'\t'
                  || 0
                  || E'\t'
                  || COALESCE(sex.code::text, '')
                  || E'\t'
                  || COALESCE(pheno.phenotype::text,'')
                  || E'\t'
      FROM %(schema_filter)s.idlink
      INNER JOIN %(schema_filter)s.pheno
      ON      idlink.patientid = pheno.patientid
      INNER JOIN sex
      ON      pheno.sex_id=sex.val
      ORDER BY idlink.id;
      """%{"schema":schema, "schema_filter":schemas_filter[k]})
      phenostrings = session.execute(pq).fetchall()
      session.commit()
      phenostringsdict.append(phenostrings)
      ids = idlist[k]
      assert len(phenostrings) == len(ids), "phenostrings and ids should have the same length, but length of phenostrings is %s and length of ids is %s"%(len(phenostrings), len(ids))
   f = open(pedfilename, 'w')
   for k, schema in enumerate(schemas):
      ids = idlist[k]
      for j, phenostr in zip(ids, phenostringsdict[k]):
         tmpfilename = os.path.join(tmpdir, os.path.basename(pedfilename)+".%(schemaname)s.part%(j)s"%{'schemaname':schema, 'j':j})
         tpf = open(tmpfilename)
         f.write(phenostr[0])
         f.write(tpf.readline())
         os.remove(tmpfilename)
   f.close()
   if os.path.isdir(tmpdir):
      shutil.rmtree(tmpdir)

def write_trunc_ped_file(Session, schema, anno_table_schema, schema_filter, pedfilename, allelea, alleleb, begin_id, end_id):
   import os, sys
   from utils import text_autocommit
   gq = text_autocommit("""
     SET search_path TO %(schema)s, public;
     SELECT array_to_string(array_agg(gvals_ord.g), E'\t') AS str
     FROM
     (
     SELECT decode_genotype(geno.snpval_id, %(allelea)s, %(alleleb)s) AS g,
     geno.idlink_id, geno.anno_id
     FROM    geno
     INNER JOIN %(as)s.anno
     ON      geno.anno_id = anno.id
     INNER JOIN %(schema_filter)s.idlink
     ON      geno.idlink_id = idlink.id
     INNER JOIN %(schema_filter)s.pheno
     ON      idlink.patientid = pheno.patientid
     WHERE idlink.id >= %(begin_id)s
     AND idlink.id < %(end_id)s
     ORDER BY idlink_id, anno_id
             ) AS gvals_ord
     GROUP BY  gvals_ord. idlink_id
     ORDER BY gvals_ord.idlink_id;
   """%({"schema":schema, "as":anno_table_schema,  'schema_filter':schema_filter, "allelea":allelea, "alleleb":alleleb, "begin_id":begin_id, "end_id":end_id}))
   genostr = Session.execute(gq).fetchall()
   pq = text_autocommit("""
   SET search_path TO %(schema)s, public;
   SELECT
                  pheno.famid
                  || E'\t'
                  || idlink.expid
                  || E'\t'
                  || 0
                  || E'\t'
                  || 0
                  || E'\t'
                  || COALESCE(sex.code::text, '')
                  || E'\t'
                  || COALESCE(pheno.phenotype::text,'')
                  || E'\t'
     FROM %(schema_filter)s.idlink
     INNER JOIN %(schema_filter)s.pheno
     ON      idlink.patientid = pheno.patientid
     INNER JOIN sex
     ON      pheno.sex_id=sex.val
     WHERE idlink.id >= %(begin_id)s
     AND idlink.id < %(end_id)s
     ORDER BY idlink.id;
    """%({"schema":schema, 'schema_filter':schema_filter, "begin_id":begin_id, "end_id":end_id}))
   phenostr = Session.execute(pq).fetchall()
   Session.commit()
   f = open(pedfilename, 'w')
   for grow, prow in zip(genostr, phenostr):
      grow = [str(x) for x in grow]
      prow = [str(x) for x in prow]
      f.write("\t".join(prow))
      f.write("\t".join(grow)+'\n')
   f.close()

@print_timing
def write_ped_file_single(session, dbstring, pedfilename, alleletype, jobs, schemas, rstr):
   import os, shutil, sys
   from utils import randstr, safe_mkdir
   from dbutils import get_db_type
   from utils import text_autocommit
   from utils import create_engine_wrapper
   from dbfunctions import create_or_replace_plpgsql, create_decode_genotype_function
   from threadpool import ThreadPool
   dbtype = get_db_type(dbstring)
   if dbtype == 'affy6' and alleletype != None:
         sys.exit("database is marked as 'affy6', so alleletype option not required - you have used %s as alleletype."%alleletype)
   if dbtype == 'illumina':
      if alleletype != 'forward' and alleletype != 'top':
         sys.exit("database is marked as 'illumina', so alleletype must be either 'forward' or 'top', not %s"%alleletype)
   if os.path.exists(pedfilename) and os.path.getsize(pedfilename) > 0:
      sys.exit("%s already exists, exiting"%pedfilename)
   allelea, alleleb = get_allelea_alleleb(alleletype)
   print "writing ped file %s..."%(pedfilename)
   from utils import get_conf
   conf = get_conf()
   tmpdir = os.path.join(conf['tmpdir'], 'tmp_%s'%rstr)
   safe_mkdir(tmpdir)
   session.execute(create_or_replace_plpgsql)
   session.execute(create_decode_genotype_function)
   sm = '_'.join(schemas) + '_merge' + '_' + rstr
   session.execute("DROP SCHEMA IF EXISTS %(sm)s CASCADE; CREATE SCHEMA %(sm)s;"%{'sm':sm})
   create_merged_anno_table_single(session, dbtype, alleletype, sm, schemas)
   schemas_filter = create_schemas_and_tables_filter(session, dbtype, rstr, schemas)
   session.commit()
   pool = ThreadPool(session, jobs)
   for k, schema in enumerate(schemas):
      q = text_autocommit("select idlink.id FROM %(schema_filter)s.pheno INNER JOIN %(schema_filter)s.idlink ON pheno.patientid = idlink.patientid"%{'schema':schema, 'schema_filter':schemas_filter[k]})
      ids = session.execute(q).fetchall()
      ids = [tup[0] for tup in ids]
      ids.sort()
      l =  len(ids)/jobs
      for i in range(jobs-1):
         tmpfilename = os.path.join(tmpdir, os.path.basename(pedfilename)+'.%s.part%s'%(schema, i))
         pool.add_task(write_trunc_ped_file, session, schema, sm, schemas_filter[k], tmpfilename, allelea, alleleb, ids[i*l], ids[(i+1)*l])
      tmpfilename = os.path.join(tmpdir, os.path.basename(pedfilename)+'.%s.part%s'%(schema, jobs-1))
      pool.add_task(write_trunc_ped_file, session, schema, sm, schemas_filter[k], tmpfilename, allelea, alleleb, ids[(jobs-1)*l], ids[len(ids)-1]+1)
   pool.start()
   #wait on the queue until everything has been processed
   pool.wait_completion()
   pf = open(pedfilename, 'wb')
   import shutil
   for schema in schemas:
      for i in range(jobs):
         tmpfilename = os.path.join(tmpdir, os.path.basename(pedfilename)+'.%s.part%s'%(schema, i))
         pfi = open(tmpfilename,'rb')
         shutil.copyfileobj(pfi, pf)
         pfi.close()
         os.remove(tmpfilename)
   pf.close()
   if os.path.isdir(tmpdir):
      shutil.rmtree(tmpdir)

##############################################################################################

def write_trunc_merged_tped_file(Session, schema, anno_table_schema, schema_filter, pedfilename, allelea, alleleb, begin_id, end_id):
   import os, sys
   from utils import text_autocommit
   gq = text_autocommit("""
   SET search_path TO %(schema)s, public;
   SELECT array_to_string(array_agg(gvals_ord.g), E'\t') AS str
     FROM
     (
     SELECT geno.anno_id, decode_genotype(geno.snpval_id, %(allelea)s, %(alleleb)s) AS g
     FROM    geno
     INNER JOIN %(as)s.anno
     ON      geno.anno_id = anno.id
     INNER JOIN %(schema_filter)s.idlink
     ON      geno.idlink_id = idlink.id
     INNER JOIN %(schema_filter)s.pheno
     ON      idlink.patientid = pheno.patientid
     WHERE geno.anno_id >= %(begin_id)s
     AND geno.anno_id < %(end_id)s
     ORDER BY geno.anno_id, geno.idlink_id
             ) AS gvals_ord
     GROUP BY gvals_ord.anno_id
     ORDER BY gvals_ord.anno_id;
   """%({'schema':schema, "as":anno_table_schema, "schema_filter":schema_filter, 'allelea':allelea, 'alleleb':alleleb, "begin_id":begin_id, "end_id":end_id}))
   genostr = Session.execute(gq).fetchall()
   Session.commit()
   f = open(pedfilename, 'w')
   for grow in genostr:
      grow = [str(x) for x in grow]
      f.write("\t".join(grow)+'\n')
   f.close()

# The anno table in anno_table_schema was already joined against the geno table and filtered, so none of that is necessary here.
def write_trunc_merged_anno_file(Session, schemaname, anno_table_schema, filename, begin_id, end_id):
   import os, sys
   from utils import text_autocommit
   pq = text_autocommit("""
   SET search_path TO %(schemaname)s;
   SELECT               name
                     || E'\t'
                     || rsid
                     || E'\t'
                     || 0
                     || E'\t'
                     || location
                     || E'\t'
   FROM
   (
   SELECT anno.id, name, rsid, location
             FROM     %(as)s.anno
                      INNER JOIN chromo
                      ON      chromosome = chromo.id
             WHERE    anno.id >= %(begin_id)s
                      AND anno.id < %(end_id)s
             ORDER BY anno.id
   ) AS q;
   """%({"schemaname":schemaname,  "as":anno_table_schema, "begin_id":begin_id, "end_id":end_id}))
   annostr = Session.execute(pq).fetchall()
   Session.commit()
   f = open(filename, 'w')
   for arow in annostr:
      arow = [str(x) for x in arow]
      f.write("\t".join(arow)+'\n')
   f.close()

@print_timing
def write_tped_file_single(session, dbstring, tpedfilename, alleletype, jobs, schemas, rstr):
   import os, sys
   from utils import randstr, safe_mkdir
   from dbutils import get_db_type
   from utils import create_engine_wrapper, text_autocommit
   from dbfunctions import create_or_replace_plpgsql, create_decode_genotype_function
   from threadpool import ThreadPool
   dbtype = get_db_type(dbstring)
   if dbtype == 'affy6' and alleletype != None:
         sys.exit("database is marked as 'affy6', so alleletype option not required - you have used %s as alleletype."%alleletype)
   if dbtype == 'illumina':
      if alleletype != 'forward' and alleletype != 'top':
         sys.exit("database is marked as 'illumina', so alleletype must be either 'forward' or 'top', not %s"%alleletype)
   if os.path.exists(tpedfilename) and os.path.getsize(tpedfilename) > 0:
      sys.exit("%s already exists, exiting"%tpedfilename)
   from utils import get_conf
   conf = get_conf()
   tmpdir = os.path.join(conf['tmpdir'], 'tmp_%s'%rstr)
   safe_mkdir(tmpdir)
   allelea, alleleb = get_allelea_alleleb(alleletype)
   print "writing tped file %s..."%(tpedfilename)
   session.execute(create_or_replace_plpgsql)
   session.execute(create_decode_genotype_function)
   sm = '_'.join(schemas) + '_merge' + '_' + rstr
   session.execute("DROP SCHEMA IF EXISTS %(sm)s CASCADE; CREATE SCHEMA %(sm)s;"%{'sm':sm})
   create_merged_anno_table_single(session, dbtype, alleletype, sm, schemas)
   schemas_filter = create_schemas_and_tables_filter(session, dbtype, rstr, schemas)
   session.commit()
   pool = ThreadPool(session, jobs)
   ids = session.execute("select id from %s.anno"%sm).fetchall()
   ids = [id[0] for id in ids]
   ids.sort()
   l =  len(ids)/jobs
   for k, schema in enumerate(schemas):
      for j in range(jobs-1):
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
         pool.add_task(write_trunc_merged_tped_file, session, schema, sm, schemas_filter[k], tmpfilename, allelea, alleleb, ids[j*l], ids[(j+1)*l])
      tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, jobs-1))
      pool.add_task(write_trunc_merged_tped_file, session, schema, sm, schemas_filter[k], tmpfilename, allelea, alleleb, ids[(jobs-1)*l], ids[len(ids)-1]+1)
   pool.start()
   #wait on the queue until everything has been processed
   pool.wait_completion()

   for j in range(jobs-1):
      annofilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.anno.part%s'%(j))
      write_trunc_merged_anno_file(session, schema, sm, annofilename, ids[j*l], ids[(j+1)*l])
   annofilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.anno.part%s'%(jobs-1))
   write_trunc_merged_anno_file(session, schema, sm, annofilename, ids[(jobs-1)*l], ids[len(ids)-1]+1)

   import shutil
   tpf = open(tpedfilename, 'w')
   tmpfdict = {}
   for schema in schemas:
      for j in range(jobs):
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
         tmpfdict[tmpfilename] = open(tmpfilename,'r')

   for j in range(jobs-1):
      annofilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.anno.part%s'%(j))
      af = open(annofilename)
      for i in ids[j*l:(j+1)*l]:
         line = af.readline().rstrip() + '\t'
         for schema in schemas[0:len(schemas)-1]:
            tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
            line = line + tmpfdict[tmpfilename].readline().rstrip() + '\t'
         schema = schemas[-1]
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
         line = line + tmpfdict[tmpfilename].readline().rstrip()
         line = line + '\n'
         tpf.write(line)
      af.close()
      os.remove(annofilename)

   annofilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.anno.part%s'%(jobs-1))
   af = open(annofilename)
   for i in ids[(jobs-1)*l:len(ids)+1]:
      line = af.readline().rstrip() + '\t'
      for schema in schemas[0:len(schemas)-1]:
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, jobs-1))
         line = line + tmpfdict[tmpfilename].readline().rstrip() + '\t'
      schema = schemas[-1]
      tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, jobs-1))
      line = line + tmpfdict[tmpfilename].readline().rstrip()
      line = line + '\n'
      tpf.write(line)
   af.close()
   os.remove(annofilename)

   for schema in schemas:
      for j in range(jobs):
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
         tmpfdict[tmpfilename].close()
   for schema in schemas:
      for j in range(jobs):
         tmpfilename = os.path.join(tmpdir, os.path.basename(tpedfilename)+'.%s.part%s'%(schema, j))
         os.remove(tmpfilename)
   tpf.close()

   if os.path.isdir(tmpdir):
      shutil.rmtree(tmpdir)

##############################################################

def write_tfam_file_single(session, dbstring, tfamfilename, schemas, rstr):
   import os, sys
   from utils import randstr
   if os.path.exists(tfamfilename) and os.path.getsize(tfamfilename) > 0:
      sys.exit("%s already exists, exiting"%tfamfilename)
   from utils import create_engine_wrapper, text_autocommit
   from dbutils import get_db_type
   from sqlalchemy.orm import sessionmaker
   db = create_engine_wrapper(dbstring)
   dbtype = get_db_type(dbstring)
   print "writing tfam file %s..."%(tfamfilename)
   schemas_filter = create_schemas_and_tables_filter(session, dbtype, rstr, schemas)
   def phenostr(schema, schema_filter):
      pq = text_autocommit("""
      SET search_path TO %(schema)s, public;
      SELECT
                     pheno.famid
                     || E'\t'
                     || idlink.expid
                     || E'\t'
                     || 0
                     || E'\t'
                     || 0
                     || E'\t'
                     || COALESCE(sex.code::text, '')
                     || E'\t'
                     || COALESCE(pheno.phenotype::text,'')
      FROM %(schema_filter)s.idlink
      INNER JOIN %(schema_filter)s.pheno
      ON      idlink.patientid = pheno.patientid
      INNER JOIN sex
      ON      pheno.sex_id=sex.val
      ORDER BY idlink.id;
      """%({"schema":schema, "schema_filter":schema_filter}))
      pstr = session.execute(pq).fetchall()
      return pstr
   f = open(tfamfilename, 'w')
   for k, schema in enumerate(schemas):
      for prow in phenostr(schema, schemas_filter[k]):
         f.write(prow[0]+'\n')
   f.close()
