#!/usr/bin/env python

import redi_dataset as dataset
import redi_sqldb as sqldb
import redi_neo4j as neo4j
import redi_utils as utils
import redi_messages as please 
import redi_associations as associations
import redi_featurematrix as featurematrix
import redi_annotations as annotations
import redi_references as references
import redi_features as features
import redi_samples as samples

from optparse import OptionParser
import ConfigParser

import sys
import os

try: import json #python 2.6 included simplejson as json
except ImportError: import simplejson as json

def settings(dataset_id, options):
    if utils.dataset_exists(dataset_id): dataset.save(dataset_id, {})

    json_data = dataset.load(dataset_id)

    itemid = options.id
    if not itemid is None:
        item, json_data = dataset.lookup_item(dataset_id, itemid)
        dataset.write_properties(item, options)
    else:
        dataset.write_properties(json_data, options)

    dataset.save(dataset_id, json_data)

import_data_types = ["features", "samples", "annotations", "references", "associations", "featurematrix"]
import_data_usage = "\tredi %s import %s -f /local/path/to/file.tsv"

def import_item(dataset_id, args, config, options):
    if len(args) <= 2:
        usages = [import_data_usage % (dataset_id,idt) for idt in import_data_types]
        please.say("The magic module for guessing data types is unavailable", "Why don't you try one of these instead?", *usages)
        exit(-1)

    item_type = args[2]
    if item_type not in import_data_types:
        usages = [import_data_usage % (dataset_id,idt) for idt in import_data_types]
        please.say("Use a supported data type:", "   " + ", ".join(import_data_types),
                    "For instance...", *usages)
        exit(-1)

    target_dir = dataset_id + "/" + item_type
    if not os.path.exists(target_dir): os.makedirs(target_dir)


    imported_file = options.targetFile
    if imported_file is None:
        please.say("The module that guesses where data is stored has yet to be implemented", "Why don't you try this instead?", 
                "\tredi %s import %s -f /local/path/to/file.tsv"%(dataset_id, item_type) )
        exit(-1)

    imported_item = []

    if item_type == "features":
        imported_item = features.load(dataset_id, imported_file, config, options)

    elif item_type == "samples":
        imported_item = samples.load(dataset_id, imported_file, config, options)

    elif item_type == "annotations":
        imported_item = annotations.load(dataset_id, imported_file, config, options)

    elif item_type == "references":
        imported_item = references.load(dataset_id, imported_file, config, options)

    elif item_type == "associations":
        imported_item = associations.load(dataset_id, imported_file, config, options)

    elif item_type == "featurematrix":
        imported_item = featurematrix.load(dataset_id, imported_file, config, options)

    json_data = dataset.load(dataset_id)
    if not item_type in json_data: json_data[item_type] = []

    if "table" in imported_item: sqldb.bulk_load(dataset_id, imported_item, config, options)
    if "graph" in imported_item: neo4j.bulk_load(dataset_id, imported_item, config, options)

    dataset.write_properties(imported_item, options)
    if not "label" in imported_item: imported_item["label"] = imported_item["id"]
    json_data[item_type].append(imported_item)  # TODO : What to do if item already exists

    dataset.save(dataset_id, json_data)

def load_item(dataset_id, args, config={}, options={}):
    item_type = "everything"
    if len(args) > 2: item_type = args[2] 
    please.say("This function will allow users to move existing datasets into a new database",
               "Sounds like a great feature, no?  Don't worry its on the to-do list",
               "BTW, this would have loaded '%s' from %s into the database"%(item_type, dataset_id) )

if __name__ == "__main__":
    parser = OptionParser(usage="%prog DATASET_ID [ settings | list | import | load | create-database | destroy-database ]")
    parser.add_option("-c", "--config", dest="configFile", help="Import Utility Configuration File", default="redi.config")
    parser.add_option("-i", "--id", dest="id", help="ID for main object of operation")
    parser.add_option("-f", "--targetFile", dest="targetFile", help="Target file for imports")
    parser.add_option("-H", "--headerFile", dest="headerFile", help="Header file for imports")
    parser.add_option("-t", "--dataType", dest="dataType", help="Data type for target of operation")
    parser.add_option("-p", "--properties", dest="properties", help="Arbitrary properties to attach to a dataset, or item (e.g. -p label='Short':description='Long')")
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Prints debugging statements")

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.print_help()
        exit(-1)

    dataset_id = args[0]
    if not dataset_id is None: dataset_id = dataset_id.replace("/", "")
    commandArg = args[1]

    if commandArg == "settings":
        settings(dataset_id, options)
        exit(0)

    if commandArg == "list":
        dataset.list_items(dataset_id)
        exit(0)

    config = ConfigParser.RawConfigParser()
    config.read(options.configFile)

    if commandArg == "create-database":
        sqldb.create(dataset_id, config)
        neo4j.create(dataset_id, config)
        exit(0)

    if commandArg == "destroy-database":
        sqldb.drop(dataset_id, config)
        neo4j.drop(dataset_id, config)
        exit(0)

    if commandArg == "import":
        import_item(dataset_id, args, config, options)

    elif commandArg == "load":
        load_item(dataset_id, args, config, options)
        
    else:
        parser.print_help()
