#
# Do not alter this file, create a copy with 
# a different name and modify that. Keep the extension .py
# otherwise it cannot be imported.
#

#
# Data processing configuration module set up to 
# process the demo data included in the distribution.
#

# 
# In this file capitalized attributes must be present (GeneTrack
# will check for their presence).
#
# All attributes may be written out as text but we use
# simple python functions to be able to generate them.
# This makes changing the attributes (such as labels) 
# a lot less error prone.
#

#
# Turns various execution steps on or off.
#
LOADER_ENABLED    = True
FITTER_ENABLED    = True
PREDICTOR_ENABLED = True
EXPORTER_ENABLED  = True

#
# We import atlas to be able to access relative directories
# so that we can find the demo data.
#
import atlas

#
# Directory setup, must be a string containing the full path to a directory 
# currently is set to the default home, relative to the main distribution.
#
home = atlas.ENV.HOME_DIR 

#
# Setting up various database locators.
# Note the parameter substitution that constructs the strings
# i.e. this will generate strings such as:
#
# HDF_DATABASE = "/home/users/jdoe/db/yeast-data.hdf"
# SQL_URI = "sqlite:///home/users/jdoe/db/yeast-data.sqlite"
#
# to access other types of databases you need to have the python
# database driver installed and you need to change th SQL_URI accordingly:
# for example to access a PostgresSQL database the SQL_URI will be
#
# SQL_URI = "postgres://scott:tiger@localhost:5432/mydatabase"
#
# see the SQL Alchemy documentation for more details on the supported
# database engines at http://www.sqlalchemy.org/docs/04/dbengine.html
#

organism = "yeast"
HDF_DATABASE = "%s/db/%s-data.hdf" % (home, organism)
SQL_URI = "sqlite:///%s/db/%s-data.sqlite" % (home, organism)

#
# Overwrite existing labels. Setting this to false means that
# data may not be accidentally overwritten, but it also means
# that you cannot rerun the same analysis on the same database.
#
CLOBBER = True

#
# Input data (for data loading) comes from this file.
#
DATA_FILE =  home + "/data/H2AZ-data.txt"

#
# The size of the data vector ( the larger the better, but uses more memory).
# No need for it to be larger than the largest chromosome size.
# We use 1 million for yeast, 10 million for drosophila, 30 million for human.
#
DATA_SIZE = 10**6

#
# Minimum peak height (larger values make for fewer stored data points and faster execution).
# It also affects fitting, the fit will not extend for the points
# below this value (this will be the base value). For large genomes this 
# can substantially speed up execution. 
#
MINIMUM_PEAK_SIZE = 0.1

#
# Fitting tolerance. This and the EXCLUSION_ZONE are the most important 
# fitting parameters as they may substantially alter the prediction results.
#
# It is the distance in bp over which the fitting curve falls to 1/2.
# Noisier  data needs larger values, must be experimented with, 
# for a good visual example of what SIGMA does see the
# plots in the Documentation -> Quick Overview.
#
SIGMA = 20

#
#
# Distance over which the fitting function is computed over.
# For normal fitting it should be a few SIGMA wide. Most of the time
# may be left as is.
#
WIDTH = SIGMA * 5

#
# HDF labels for data, fitted data.
# Peak labels go into the SQL database.
#
DATA_LABEL = "H2AZ"
FIT_LABEL  = "%s-SIGMA-%s" % (DATA_LABEL, SIGMA)
PEAK_LABEL = "PRED-%s" % FIT_LABEL

#
# Peak detection parameters, the exclusion zone is the region over 
# which only one peak may be present.
#
EXCLUSION_ZONE = 147

#
# The expected position of the feature relative
# to the peak that is detected. For a nucleosome
# detection we extend equally in both directions.
#
LEFT_SHIFT  = EXCLUSION_ZONE/2
RIGHT_SHIFT = EXCLUSION_ZONE/2

#
# Data export.
#
# Note that you can export from the database as well
# with simple select queries, this feature is for convenience
# files that go into the /html/static/download folder will
# be visible and downloadable via the webserver.
#
EXPORT_LABELS  = [ PEAK_LABEL ]
EXPORT_DIR     = home + "/html/static/download"

#
# Functions specification, by overriding these one
# may completely change what the server does
#

# Specifies the data file format parser.
from mod454.loader import loader as LOADER

# Specifies the data fitter.
from mod454.fitter import fitter as FITTER

# Specifies the data predictor.
from mod454.predictor import predictor as PREDICTOR

# Specfies the data exporter.
from atlas.commands import exporter as EXPORTER

#
# For convenience this below will autoload files with the ORF features.
# The file must be tab separated, see the home/data/yeast-features.txt 
# file for the required columns.
#
from atlas import sql
feature_file =  home + "/data/yeast-features.txt"
sql.load_features( SQL_URI, feature_file, clobber=False, drop=False )

