import redi_utils as utils
import redi_sqldb as sqldb

def generate_stats(dataset_id, statsType, targetA_id, targetB_id, config, options):
    if targetA_id is None:
        print "===> ERROR: Specify target for generating statistics "
        print "Usage: redi " + dataset_id + " generate-stats " + statsType + " FEATURE_MATRIX_ID [ANNOTATIONS_ID]"
        exit(-1)

    sqlcmds = []
    json_statistics = { "id": "STATS_" + targetA_id, "type": statsType }

    if statsType == "features":
        targetA = dataset.lookup_item(dataset_id, targetA_id)
        newtablename = targetA["table"] + "_STATS_BY_FEATURE"
        json_statistics["table"] = newtablename
        json_statistics["label"] = newtablename

        sqlcmds.append("CREATE TABLE " + newtablename + " AS SELECT FEATURE_ID, COUNT(*) AS SAMPLE_COUNT, AVG(VALUE) AS MEAN_VALUE, STD(VALUE) AS STD_VALUE FROM " + targetA["table"] + " GROUP BY FEATURE_ID")

    elif statsType == "samples":
        targetA = dataset.lookup_item(dataset_id, targetA_id)
        newtablename = targetA["table"] + "_STATS_BY_SAMPLE"
        json_statistics["table"] = newtablename
        json_statistics["label"] = newtablename

        sqlcmds.append("CREATE TABLE " + newtablename + " AS SELECT SAMPLE_ID, COUNT(*) AS SAMPLE_COUNT, AVG(VALUE) AS MEAN_VALUE, STD(VALUE) AS STD_VALUE FROM " + targetA["table"] + " GROUP BY SAMPLE_ID")

    elif statsType == "annotations":
        if targetA_id is None or targetB_id is None:
            print "===> ERROR: Specify targets for generating statistics "
            print "Usage: redi " + dataset_id + " generate-stats annotations FEATURE_MATRIX_ID ANNOTATIONS_ID"
            exit(-1)

        targetA = dataset.lookup_item(dataset_id, targetA_id)
        targetB = dataset.lookup_item(dataset_id, targetB_id)
        if not "type" in targetB:
            print "===> ERROR: Annotation target [" + targetB_id + "] does not specify annotation type (samples | features)"
            exit(-1)

        annotationsType = targetB["type"]
        newtablename = None

        if annotationsType == "features":
            newtablename = targetA["table"] + "_STATS_BY_F_ANNOTATION_" + targetB_id
            sqlcmds.append("CREATE TABLE " + newtablename + " AS SELECT A.VALUE AS ANNOTATED_VALUE, AVG(F.VALUE) AS MEAN_VALUE, STD(F.VALUE) AS STD_VALUE FROM " + targetA["table"] + " F, " + targetB["table"] + " A WHERE F.FEATURE_ID = A.FEATURE_ID GROUP BY A.VALUE")
        elif annotationsType == "samples":
            newtablename = targetA["table"] + "_STATS_BY_F_ANNOTATION_" + targetB_id
            sqlcmds.append("CREATE TABLE " + newtablename + " AS SELECT A.VALUE AS ANNOTATED_VALUE, AVG(S.VALUE) AS MEAN_VALUE, STD(S.VALUE) AS STD_VALUE FROM " + targetA["table"] + " S, " + targetB["table"] + " A WHERE S.SAMPLE_ID = A.SAMPLE_ID GROUP BY A.VALUE")
        else:
            print "===> ERROR: Unknown annotations type for target [" + targetB_id + "," + annotationsType + "]"
            exit(-1)

        json_statistics["table"] = newtablename
        json_statistics["label"] = newtablename

    else:
        print "===> ERROR: Specify supported stats to generate"
        print "Usage: redi " + dataset_id + " generate-stats (features | samples | annotations) FEATURE_MATRIX_ID [ANNOTATIONS_ID]"
        exit(-1)

    sqldb.execute(dataset_id, sqlcmds, config, options)

    print "Updating Dataset to include generated statistics"
    dataset.write_properties(json_statistics, options)

    json_data = dataset.load(dataset_id)
    if not "statistics" in json_data: json_data["statistics"] = []
    json_data["statistics"].append(json_statistics)

    dataset.save(dataset_id, json_data)

def import_alias(dataset_id, config, options):
    filename = options.targetFile
    if filename is None:
        print "===> ERROR: Specify path to file for import"
        print "Usage: redi " + dataset_id + " import-alias -f /local/path/to/file.tsv"
        exit(-1)

    tablesuffix = options.id
    if tablesuffix is None: tablesuffix = utils.get_filename(filename)

    sqlcmds = []
    tablename = "ALIAS_" + tablesuffix

    columnssql = ""
    rfile = open(filename)
    columnheaders = rfile.next()
    if (columnheaders != ""):
        columnlist = columnheaders.rstrip().split("\t")
        for i, column in enumerate(columnlist):
            if i > 0: columnssql += ", "
            columnssql += column.rstrip().replace(" ", "_") + " VARCHAR(100) "

    sqlcmds.append("CREATE TABLE " + tablename + " (" + columnssql + ")")
    sqlcmds.append("LOAD DATA LOCAL INFILE '" + filename + "' INTO TABLE " + tablename + " FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' IGNORE 1 LINES")

    sqldb.execute(dataset_id, sqlcmds, config, options)

    print "Copying data into dataset " + dataset_id + "/alias directory"
    utils.copy_file(dataset_id + "/alias", filename)

    print "Updating Dataset to include imported feature"
    json_alias = { "id": tablesuffix, "type": "alias", "table": tablename, "label": tablesuffix }
    dataset.write_properties(json_alias, options)

    json_data = dataset.load(dataset_id)
    if not "alias" in json_data: json_data["alias"] = []
    json_data["alias"].append(json_alias)

    dataset.save(dataset_id, json_data)