#!/usr/bin/env python

import sys
import psycopg2
import os
import argparse
import re
import sblab
import traceback
import xlrd
import datetime
import warnings

parser = argparse.ArgumentParser(description= """
DESCRIPTION

    Read a sample sheet and import it to sblab.
    Typically the tables to upload will be: samples, exp_design, libraries, solexa_lims.
    Blank lines and comment lines (#) are skipped.
    The sample sheet should have a header line with column names formatted as
    <tablename>.<columnname> (e.g. solexa_lims.service_id)
    
    If the data relative to a table are already in the database, that data is not
    imported (to avoid primary key errors). This allows adding libraries to samples
    or samples to solexa_lims.services. In fact, uploading the same sample sheet
    twice, will result in no inserts at all the second time (and no erros thrown)
    
    Marginal support for Excel:
        - Make sure samplesheet is in the first sheet
        - For NULL values use some string (e.g. NA) and set the -n option (e.g. -n NA)
        - Do not leave blank columns between data columns
NOTES:
    - Only tables found in filing_order with "filing_order" > 0 and not null will be imported.
    Tables will be imported in the order given by "filing_order". It should not be essential
    to establish a correct import order as foreing keys and constraint triggers should
    all be deferred.

TODO
    - Set correct defualt for libraries.library_id when using sample_id. E.g.
      If sample_id is DB001 library_id should become db001. 
    - If none of the columns relative to a table are in the sample sheet, don't
    consider that table at all. I.e. don't try to set default or insert any rows.
    (e.g. if the sample sheet contains samples and libraries but no service_id for
    solexa_lims). NB: It can be messy to upload partial data!
    - Remove blank columns the same way blank rows are removed
    
UNIT TEST

echo -e "## Test data for read_samplesheet.py
#  ---------------------------------
contacts.contact_name\tcontacts.initials\tsamples.sample_id\tsamples.source_name\tsamples.contact\tsamples.organism\tsamples.molecule\tsample_sources.source_name
Martufello\tmf\tmf001\tBreast cancer\tMartufello\tHomo sapiens\tgenomic DNA\tBreast cancer
" > /tmp/read_samplesheet_utest.txt

read_samplesheet.py -s /tmp/read_samplesheet_utest.txt --nocommit
rm /tmp/read_samplesheet_utest.txt
    
    """, formatter_class= argparse.RawTextHelpFormatter)

parser.add_argument('--samplesheet', '-s',
                   required= True,
                   help='''Sample sheet to import, plain text tab separated.
See a template sample sheet for detailed format.
                   ''')

parser.add_argument('--nullstring', '-n',
                   required= False,
                   default= None,
                   help='''Character string for missing values that will be inserted as NULL. E.g.
use '' for empty string as null. Default is None meaning that NULL values are never inserted.
                   ''')

parser.add_argument('--nocommit',
                   action= 'store_true',
                   help='''Don't commit changes to database. I.e. just test
whether the sample sheet can be imported without problems
                   ''')

args = parser.parse_args()

# ------------------------------------------------------------------------------

def read_txt(infile):
    """Open a generic text file and return it as list of lists (inner list is row)
    NB this function and read_xls must return the same output!"""
    ssheet= open(infile, 'U').readlines()
    ssheet= [x.strip('\n\r') for x in ssheet]
#    ssheet= [x for x in ssheet if x.strip() != ''] ## Remove blank lines
#    ssheet= [x for x in ssheet if not x.strip().startswith('#')] ## Remove comment lines
    ssheet= [x.split('\t') for x in ssheet]
    return(ssheet)

def read_xls(infile):
    """Open excel file and reads in the *first* sheet containing the samplesheet.
    Output is (MUST BE) the same as read_txt().
    NB: need xlrd 0.8 or higher for Excel 2007 or later"""
    wb = xlrd.open_workbook(infile)
    sh = wb.sheet_by_index(0)
    ssheet= []
    for rownum in range(sh.nrows):
        row= []
        row_values= sh.row_values(rownum)
        row_types= sh.row_types(rownum) ## For type codes see https://secure.simplistix.co.uk/svn/xlrd/trunk/xlrd/doc/xlrd.html
        for value, type in zip(row_values, row_types):
            if type == 1:
                "String"
                row.append(str(value))
            elif type == 2:
                if int(value) == value:
                    "Flot with no decimal part (==> Int)"
                    row.append(int(value))
                if type == 2 and int(value) != value:
                    "Float"
                    row.append(value)
            elif type == 3:
                "dates"
                cell_date= xlrd.xldate_as_tuple(value, 0)
                row.append(datetime.datetime(cell_date[0], cell_date[1], cell_date[2], cell_date[3], cell_date[4], cell_date[5]))
            elif type == 4:
                if value == 1:
                    row.append(True)
                else:
                    row.append(False)
            elif type == 0:
                "Emtpy string"
                row.append(value)
            else:
                sys.exit('Unexpected data type')
        ssheet.append(row)
    return(ssheet)
    
def quoter(x):
    ''' Add double quotes around string x for use in sql statement as column or table name

    EXAMPLES:
        quoter('x')
        "x"
        >>> quoter('x"x')
        "x""x"
        >>> quoter('x"x"')
        "x""x"""
    '''
    x= re.sub('"', '""', x) ## Put two double quotes
    x= '"' + x + '"'
    return(x)

def nullify(rowlist, null):
    """ Find and replace in list rowlist the string null and replaces with None
    in order to insert it in the target table as NULL.
    EXAMPLE:
        rowlist= ['SLX-1000', '', 'SLX-2000']
        null= ''
        nullify(rowlist, null) #=> ['SLX-1000', 'NULL', 'SLX-2000']
    """
    nullrow= []
    for x in rowlist:
        if x == null:
            nullrow.append(None)
        else:
            nullrow.append(x)
    return(nullrow)

def transposed(lists):
    """Given a list of lists representing a table, traspose rows with columns.
    Note how incomplete rows are padded with None.
    E.g.
    >>> x= [['a', 'b', 'c'],
             [1,   2,   3],
             [4,   5,   6],
            ['x', 'v']]

    >>> transposed(x)
    [['a', 1, 4, 'x'],
     ['b', 2, 5, 'v'],
     ['c', 3, 6, None]]
    
    Function found http://code.activestate.com/recipes/410687/
    """    
    if not lists:
        return([])
    return(map(lambda *row: list(row), *lists))

def remove_empty_tables(tables, table_dict):
    """Tables is a list of tables to import, table_dict is dict like
    {'table.column': [column data]}
    E.g. {'solexa_lims.service_id': ['SLX-1000', 'SLX-1001', ...], }
    If there are no columns associated to 'table' (removed because all blank),
    than return the list of "tables" with the empty table removed.

    E.g.
    tables= ['solexa_lims', 'libraries', 'samples']
    table_dict= {'libraries.library_id': ['mylib'], 'samples.sample_id': ['mysample']}
    remove_empty_tables(tables, table_dict) >>> ['libraries', 'samples']
    """
    columns= table_dict.keys()
    nonempty_table= []
    for t in tables:
        tcolumns= [x for x in columns if x.startswith(t + '.')]
        if not tcolumns == []:
            nonempty_table.append(t)
    return(nonempty_table)


def import_to_table(table, table_dict):
    """
    Prepare and execute the create & insert statements to import the column in table.
    ARGS:
        table: The target database table to import to.
        table_dict: A dictionary containing the data to import. Example format is:
                    
                    {'solexa_lims.service_id': ['SLX-1000', 'SLX-1001', ...], 'samples.sample_id': ['db001', 'db002', ...]}
                    
                    import_to_table will extract the relevant keys (columns) by matching the table in <table> with the key name.
    EXAMPLE:
        import_to_table('solexa_lims', {'solexa_lims.service_id': ['SLX-1000', 'SLX-1001', ...], ...})
    """
    print('\n-- Importing to table: "%s"' %(table,))
    table_dict= table_dict
    table_dot= table + '.'
    table_tmp= table + '_tmp'
    cols= [x for x in table_dict.keys() if x.startswith(table_dot)] ## From the column names in sample sheet, get those starting with <table.>.
    cols= [x.replace(table_dot, '') for x in cols] ## Get the column name. E.g. from 'solexa_lims.service_id' >>> 'service_id'
    rows= []
    for k in cols:
        "Get the data that will go in <table>. List of lists where each sublist is a *column* of data"
        rows.append(table_dict[table_dot + k])
    
    ## Put temporarily here the data. They can't go straight to the destination table because duplicate
    ## rows have to be removed.
##    sql= 'CREATE TEMP TABLE ' + table_tmp + ' (LIKE ' + table + ' INCLUDING DEFAULTS);'
    sql= 'CREATE TEMP TABLE ' + table_tmp + ' AS (SELECT * FROM ' + table + ' WHERE 1 = 2);'
    # print(cur.mogrify(sql))
    cur.execute(sql)
    
    ## Now dynamically prepare the SQL for psycopg2 which is something like
    ## 'INSERT INTO solexa_lims (service_id, refdate) VALUES (%s, %s);'
    s_string= '%s, '*len(cols) ## Repeat %s for cur.execute()
    s_string= s_string[:-2] ## Remove last comma
    colstring= ', '.join([quoter(x) for x in cols]) ## Column names to plug into sql
    sql= 'INSERT INTO ' + table_tmp + '(' +  colstring + ') VALUES (' + s_string + ');'
    for i in range(0, len(rows[0])):
        ## Insert one row at a time
        row= [x[i] for x in rows]
        if args.nullstring is not None:
            row= nullify(row, args.nullstring)
        # print(cur.mogrify(sql, row))
        cur.execute(sql, row)
    ## Now insert into the the real table from the tmp table. Note that rows already in target table are excepted:
    sqlexcept= 'SELECT DISTINCT ' + colstring + ' FROM ' + table_tmp + ' EXCEPT SELECT ' + colstring + ' FROM ' + table
    cur.execute(sqlexcept)
    nrows= cur.rowcount
    # --------------------
    # Only for diagnostics
    sqldata= cur.fetchall()
    header= [x[0] for x in cur.description]
    sblab.prettyPrintTable(sorted(sqldata), header)
    # -------------------
    sql= 'INSERT INTO ' + table + '(' + colstring + ') ' + sqlexcept + ';'
    # print(cur.mogrify(sql))
    cur.execute(sql)
    sql= 'DROP TABLE %s;' %(table_tmp) ## Remove tmp table
    #print(cur.mogrify(sql))
    #print('-- %s rows inserted.' %(nrows))
    cur.execute(sql)

def transposed2(lists, defval=''):
    """Transpose list of lists. See http://code.activestate.com/recipes/410687-transposing-a-list-of-lists-with-different-lengths/
    seqbag = [[1, 2, 3], [4, 5, 6, 7], [8, 9]]
    transposed2(seqbag)
    >>> [[1, 4, 8], [2, 5, 9], [3, 6, 0], [0, 7, 0]]
    
    NB: None is replaced with ''
    """
    if not lists:
        return []
    return(map(lambda *row: [elem or defval for elem in row], *lists))

def remove_blank_columns(lists, blank= ''):
    """Remove columns from a list of lists where all values are blank.
    lists:
        List of lists. Inner lists are *rows*.
    blank:
        String to consider 'blank'
    
    mylist= [[1, '', 2, 'NULL'], [3, '', 4, 'NULL'], [5, '', 6, 'NULL']]
    
    remove_blank_columns(mylist)
    >>> [[1, 2, 'NULL'], [3, 4, 'NULL'], [5, 6, 'NULL']]
    remove_blank_columns(mylist, 'NULL')
    >>> [[1, '', 2], [3, '', 4], [5, '', 6]]
    """
    lists_t= transposed2(lists)
    list_clean= []
    for x in lists_t:
        if not set(x) == set([blank]):
            list_clean.append(x)
    return(transposed2(list_clean))

def remove_blank_rows(lists, blank= ''):
    """Remove blank rows from list of lists (as typically returned by read_xls or read_txt).
    See also:
        remove_blank_columns()
    """
    list_clean= []
    for x in lists:
        if not set(x) == set([blank]):
            list_clean.append(x)
    return(list_clean)

def remove_comment_rows(lists, comment= '#'):
    """ Remove comment rows from list of lists (as typically returned by read_xls or read_txt)
    A row is a comment if the first non blank character is "comment"
    """
    list_clean= []
    for row in lists:
        line= ''.join([str(x) for x in row]).strip()
        if line.startswith('#'):
            continue
        else:
            list_clean.append(row)
    return(list_clean)

def exclude_blank_fields(table_dict, blank= ''):
    """Remove fields (columns) where all the columns are blank
    For example if column samples.description is all blank, remove it from the
    table.
    The input is a dictionary of <table.column>(keys) and list of values.E.g.
    {'samples.sample_id': ['a', 'b', ...], 'libraries.library_id': ['x', 'y', ...]}
    
    Output is the same dictionary where the key-value pairs where the list of values
    is always '' is removed
    """
    keys= table_dict.keys()
    for k in keys:
        if set(table_dict[k]) == set([blank]):
            del(table_dict[k])
    return(table_dict)
    
# ------------------------------------------------------------------------------

xlrd_version= xlrd.__VERSION__.split('.')
if int(xlrd_version[0]) < 1 and int(xlrd_version[1]) < 8:
    warnings.warn('\n\nxlrd version is < 0.8.0 (%s)\nExcel files 2007+ will not be read!\n\n' %(xlrd.__VERSION__))

try:
    ssheet= read_xls(args.samplesheet)
except xlrd.biffh.XLRDError:
    ssheet= read_txt(args.samplesheet)
ssheet= remove_blank_rows(remove_blank_columns(ssheet))
ssheet= remove_comment_rows(ssheet)

if len(set([len(x) for x in ssheet])) != 1:
    sys.exit('\nNot all rows have the same number of columns: %s \n\nExiting...\n' %([len(x) for x in ssheet]))

## Remove duplicate columns if any
#ssheet_trans= transposed(ssheet)
#ssheet_nodup= []
#for x in ssheet_trans:
#    if x not in ssheet_nodup:
#        ssheet_nodup.append(x)
#ssheet= transposed(ssheet_nodup)

# ------------------------------------------------------------------------------
# Convert sample sheet to dictionary:
# ------------------------------------------------------------------------------

colnames= ssheet[0]
if len(colnames) != len(set(colnames)):
    print(colnames)
    sys.exit('\nDuplicate column names found in %s\n' %(args.samplesheet))
for x in colnames:
    if '.' not in x.strip('.'):
        sys.exit('\nUnexpected column name: "%s".\nNames should have the format <table name>.<column name>\n' %(x))
data= ssheet[1:]

table_dict= {}
for i in range(0, len(colnames)):
    col= [x[i] for x in data]
    table_dict[colnames[i]]= col
table_dict= exclude_blank_fields(table_dict)

## See which table have to be uploaded.
## The expected format of the column names is <table>.<column>. Get the string
## before the dot and skip colnames without a dot.
tables_unordered= [x.split('.')[0] for x in colnames if '.' in x]
tables_unordered= list(set(tables_unordered)) ## This is a list of tables as extracted from the header of the sample sheet.

# ------------------------------------------------------------------------------
# Set correct import order
# ------------------------------------------------------------------------------

conn= psycopg2.connect(sblab.get_psycopgpass())
cur= conn.cursor()

sql= 'SELECT update_filing_order();'
#print(cur.mogrify(sql))
cur.execute(sql)
sql= 'SELECT table_name FROM filing_order WHERE filing_order > 0 AND filing_order is not null ORDER BY filing_order'
#print(cur.mogrify(sql))
cur.execute(sql)
forder= [x[0] for x in cur.fetchall()] ## This is a list of table names from filing_order
tables= []  ## Tables in sample sheet will be ordered accoriding to filing_order.
for t in forder:
    if t in tables_unordered:
        tables.append(t)
nonempty= remove_empty_tables(tables, table_dict)
tables= [x for x in nonempty]

for t in tables_unordered:
    "Check all tables in sample sheet exist in filing_order"    
    if t not in forder:
        sys.exit('\nTable "%s" not found in "filing_order" (where filing_order.filing_order > 0).\n' %(t))

# ------------------------------------------------------------------------------
# Set defaults
# ------------------------------------------------------------------------------
if 'samples' in tables:
    if 'samples.sample_id' in table_dict.keys():
        "Capitalize initials"
        sname= table_dict['samples.sample_id']
        table_dict['samples.sample_id']= [sblab.convert_id(x, 'to_sample') for x in sname]
if 'libraries' in tables:
    if 'libraries.library_id' not in table_dict.keys() and 'samples.sample_id' in table_dict.keys():
        "Create library_id given the sample_id ->   Lowercase initials"
        sname= table_dict['samples.sample_id']
        table_dict['libraries.library_id']= [sblab.convert_id(x) for x in sname]
    if 'libraries.sample_id' not in table_dict.keys() and 'samples.sample_id' in table_dict.keys():
        table_dict['libraries.sample_id']= table_dict['samples.sample_id']
    if 'libraries.contact' not in table_dict.keys() and 'samples.contact' in table_dict.keys():
        table_dict['libraries.contact']= table_dict['samples.contact']
if 'fastqfiles' in tables:
    if 'fastqfiles.library_id' not in table_dict.keys():
        table_dict['fastqfiles.library_id']= table_dict['libraries.library_id']

# ------------------------------------------------------------------------------
# Some checks
# ------------------------------------------------------------------------------

"""

"""

# ------------------------------------------------------------------------------
# Start importing
# ------------------------------------------------------------------------------

print('\nTables found (they will be updated in this order):')
print('-'*60)
for t in tables:
    print(t)
print('-'*60)

for t in tables:
    ## Import *all* tables excluded those below (many-to-many excluded)
    if t in ['exp_design', 'projects', 'project_samples', 'lib2seq', 'fastq2bam']:
        continue
    else:
        import_to_table(t, table_dict)

# ------------------------------------------------------------------------------
# Update many-to-many tables
# ------------------------------------------------------------------------------
## projects_samples
if ('samples' in tables) and ('projects' in tables):
    m2m_dict= {'project_samples.project': table_dict['projects.project'], 'project_samples.sample_id': table_dict['samples.sample_id']}
    import_to_table('project_samples', m2m_dict)

## lib2seq
#if ('libraries' in tables) and ('samples' in tables):
#    m2m_dict= {'lib2seq.library_id': table_dict['libraries.library_id'], 'lib2seq.service_id': table_dict['solexa_lims.service_id']}
#    import_to_table('lib2seq', m2m_dict)
if ('libraries' in tables) and ('solexa_lims' in tables):
    m2m_dict= {'lib2seq.library_id': table_dict['libraries.library_id'], 'lib2seq.service_id': table_dict['solexa_lims.service_id']}
    import_to_table('lib2seq', m2m_dict)

## 
if ('fastqfiles' in tables) and ('bamfiles' in tables):
    m2m_dict= {'fastq2bam.fastqfile': table_dict['fastqfiles.fastqfile'], 'fastq2bam.bamfile': table_dict['bamfiles.bamfile']}
    import_to_table('fastq2bam', m2m_dict)

# ------------------------------------------------------------------------------
# Parse and uplaod to exp_design
# ------------------------------------------------------------------------------
# The column names starting with exp_desing.<variable> have to be normalized
#
# ---- FROM:
# exp_design.cell_line  exp_design.chip_target    exp_design.drug   samples.sample_id
# MCF-7                 foxm1                     DMSO              ds001
# MCF-7                 foxm1                     TS                ds002
# MCF-7                 foxm1                     DMSO              ds003
#
# ---- TO:
# sample_id   s_value   s_variable
# ds001       MCF-7     cell_line
# ds001       foxm1     chip_target
# ds001       DMSO      drug
# ds002       MCF-7     cell_line
# ds002       foxm1     chip_target
# ds002       TS        drug
# ds003       MCF-7     cell_line
# ds003       foxm1     chip_target
# ds003       DMSO      drug
#
# ---- TO:
# exp_desing= {'exp_design.sampled_id': ['ds001', 'ds001', 'ds001', 'ds002', ...], 'exp_design.s_value': ['MCF-7', 'foxm1', ...], 'exp_design.s_variable': ['cell_line', ...]}
if 'exp_design' in tables:
    try:
        nrows= len(table_dict['samples.sample_id'])
    except KeyError:
        traceback.print_exc(file=sys.stdout)
        print('-'*60)
        sys.exit("""It appears that there are experimental variables (columns named exp_design.<exp var>) but no column samples.sample_id.
Please include a column of sample IDs under samples.sample_id to link samples to experimental variables
                 """)
            
    nexp= len([x for x in table_dict.keys() if x.startswith('exp_design.')])
    
    # Normalize values
    s_value= []
    for i in range(0, nrows):
        for c in [k for k in table_dict.keys() if k.startswith('exp_design.')]:
            s_value.append(table_dict[c][i])
    keep_idx= []
    for i in range(0, len(s_value)):
        " Keep only values which are not missing "
        if s_value[i] != '':
            keep_idx.append(i)
    s_value= [s_value[i] for i in keep_idx]
    
    # Normalize sample_ids [ds001, ds001, ..., ds002, ds002, ..., ds00n, ds00n]
    sample_idx= [[x] * nexp for x in table_dict['samples.sample_id']]
    sample_id= []
    for x in sample_idx:
        sample_id = sample_id + x
    sample_id= [sample_id[i] for i in keep_idx]
    
    # Normalize variables [var1, var2,...varN, ..., var1, var2,..., varN]
    s_variable= [x.replace('exp_design.', '') for x in table_dict.keys() if x.startswith('exp_design.')] * nrows
    s_variable= [s_variable[i] for i in keep_idx]
    
    exp_design= {'exp_design.sample_id': sample_id, 'exp_design.s_variable': s_variable, 'exp_design.s_value': s_value}
    
    # Check all the variables and values are already in the database:
    cur.execute('SELECT s_variable FROM exp_variables')
    db_vars= [x[0] for x in cur.fetchall()]
    cur.execute('SELECT s_variable, s_value FROM exp_values')
    db_vals= cur.fetchall()
    exp_design__exp_values= [str(x) for x in exp_design['exp_design.s_value']] ## Convert all values to string because exp_design.s_value is text. This is probably going to screw up excel dates.
    missing_vars= list(set([x for x in s_variable if x not in db_vars]))
    missing_vals= list(set([x for x in zip(exp_design['exp_design.s_variable'], exp_design__exp_values) if x not in db_vals]))
    if missing_vars != [] or missing_vals != []:    
        if missing_vars != []:
            print('\nThe following experimental variables could not be found in table "exp_variables":\n%s\n' %(missing_vars))
        if missing_vals != []:
            print('\nThe following experimental variables/values could not be found in table "exp_values":\n%s\n' %(missing_vals))
        sys.exit("""Please correct the samples sheet "%s" or update the tables
    "exp_variables" and "exp_values" to include these variables/values\n""" %(args.samplesheet))
    
    import_to_table('exp_design', exp_design)

if not args.nocommit:
    print('\nCommit changes\n')
    conn.commit()
else:
    print('\nChanges have not been commited\n')
    conn.rollback()
    
cur.close()
conn.close()

sys.exit()
