"""Navala - database module.

TODO: check for duplicate names in stock, stockset, agent tables.

"""
import os
from tables import *
import Pyro4
import cPickle as pkl
import infogetter
import datetime
from threading import RLock
import sys
import numpy as np
import numpy.lib.recfunctions as rf
import dateutil.relativedelta as rd
import re

from misc import misc
from misc import timedata
import time
import threads
import logging
import navalasystem as na
import misc.quoteperiod as qp
from misc import constants

FLAG_PROFILE = True # Whether to record times

###############################################################################
## TABLE DEFINITIONS

class Stock(IsDescription):
    """Definition of the "stock" table.

    Expected composition of 
    supermarket_market_symbol
    \----------+-----------/
               |
               name
               
    "name" is used to name the Quote tables.
    
    TODO: for some reason, Sphynx is skipping all IsDescription classes
    """
    id = Int64Col(pos=0)
    supermarket = StringCol(32, pos=1) # Example: "BOVESPA"
    market = StringCol(32, pos=2) # Example: "VISTA"
    symbol = StringCol(32, pos=3) # Local symbol, such as "PETR4"
    name = StringCol(96, pos=4) # Example: "BOVESPA.VISTA.PETR4".
                                 # Redundant, Yes but this simplifies logic and
                                 # besides, PyTables only allows single-column
                                 # indexes. Perhaps the best justification is
                                 # that this field is used as the name for the
                                 # corresponding Quote tables.
    description = StringCol(128, pos=5) # e.g. "PETROBRAS"
    sanity = FloatCol(pos=6)

class Quote(IsDescription):
    """
    Description of a Quotes table.
    
    This table does not have the idstock field because its name will be
    the idstock itself.
    """
    
    timestamp = Int64Col(pos=0)
    open = FloatCol(dflt=0, pos=1)
    close = FloatCol(dflt=0, pos=2)
    high = FloatCol(dflt=0, pos=3)
    low = FloatCol(dflt=0, pos=4)
    volume = FloatCol(dflt=0, pos=5)
    numTrades = FloatCol(dflt=0, pos=6)
    status = IntCol(pos=7) # timedata.QuoteStatus: regular/auction/aftermarket
    flagRealtime = BoolCol(pos=8)

class GenericTimeData(IsDescription):
    timestamp = Int64Col(pos=0)
    y = FloatCol(dflt=0, pos=1)

class StockSet(IsDescription):
    id = Int64Col(pos=0)
    name = StringCol(64, pos=1)
    description = StringCol(255, pos=2)
    
class StockSetItem(IsDescription):
    id = Int64Col(pos=0)
    idstockset = Int64Col(pos=1)
    idstock = Int64Col(pos=2)


class Agent(IsDescription):
    """
    Definition for the agent table.
    
    Fields:
    id
    name -- maximum 64 characters
    data -- maximum 16192 characters
    """
    id = Int64Col(pos=0)
    name = StringCol(64, pos=1)
    data = StringCol(16192*8, pos=2)


###############################################################################
## DATABASE
class Database(object):
    """Database class.
    
    All methods share the same Lock to make sure that only one thread
    is either reading or writing data. I hope this doesn't become a bottleneck.
    
    * routines: reentrant with @misc.CatchException decorator
    _*: reentrant only but without that decorator, for internal use only
    __: NOT reentrant, STRICTLY for internal use

    """
    H = -1
    filename = "" # Name of H5F file
    R = RLock()
    
    ###########################################################################
    ## General-purpose / misc
    
    def __init__(self):
        self.logger = misc.GetLogger(name="database")
    
    def CreateIfDoesNotExist(self):
        with self.R:
            if not os.path.isfile(self.filename):
                self.logger.info("Creating database...")
                self.__CreateDatabase(False)

    def AssertOpen(self):
        """Makes sure the file is open."""
        with self.R:
            if self.H == -1:
                self.__Open()
            elif not self.H.isopen:
                    self.__Open()
            
    def GetNode(self, sNode):
        """Gets node by name. If not a string, returns sNode."""
        with self.R:
            self.AssertOpen()
            return self.__GetNode(sNode)

    def Read(self, sNode):
        """Reads the whole table. Returns a numpy.ndarray."""
        with self.R:
            self.AssertOpen()
            node = self.__GetNode(sNode)
            return node.read()

    def ReadWhere(self, sNode, sWhere):
        """
        Returns rows as a numpy.ndarray object
        
        sNode -- may be either a table node (as returned by GetNode()),
                or a string containing the node path in the database.
        """
        with self.R:
            self.AssertOpen()
            node = self.__GetNode(sNode)
            return node.readWhere(sWhere)
        
    def FirstRow(self, sNode, sWhere, flagErrorIfNotFound=0):
        """
        Returns first row satisfying condition (as numpy RecArray, I guess).
        
        Arguments: 
            node -- may be either a table node (as returned by GetNode()),
                    or a string containing the node path in the database.
            sWhere -- condition
            flagErrorIfNotFound=0 -- obvious
        
        If there is no match, returns None.
        """
        with self.R:
            self.AssertOpen()
            return self.__FirstRow(self.__GetNode(sNode), sWhere, \
                                   flagErrorIfNotFound)
                                   
    def AnyRow(self, sNode, sWhere):
        """Returns whether there is any row satisfying condition.
        
        Arguments: 
            node -- may be either a table node (as returned by GetNode()),
                    or a string containing the node path in the database.
            sWhere -- condition
        """
        with self.R:
            self.AssertOpen()
            return self.__FirstRow(self.__GetNode(sNode), sWhere, \
                                   False) is not None
        

    def __CreateDatabase(self, flagOverwrite=False):
        """
        Creates or re-creates database with overwrite option.
        
        The database is closed after creation.
        
        Arguments:
          flagOverwrite=False -- whether to allow for overwritting of existing
                                 database
        """
        if not flagOverwrite and os.path.isfile(self.filename):
            raise DatabaseExists("Existing database: %s" % self.filename)
        
        # Creates file and directories for different time periods for the
        # quotes tables
        h5f = openFile(self.filename, mode = 'w', title = 'Navala database')
        for row in qp.data: # One group for each quote period
            h5f.createGroup('/', row['varname'], '%s quotes' % row['english'])
        h5f.createGroup("/", "tmp")
        h5f.close() 
        
        self.AssertOpen()
        self.__CreateStock()
        self.__CreateStockset()
        self.__CreateStocksetitem()
        self.__CreateAgent()
        
        h5f.close() 

    def __CreateStock(self):
        """Creates stock table."""
        tbl = self.H.createTable('/', 'stock', Stock)
#        tbl.cols.id.createIndex()
#        tbl.cols.name.createIndex()

    def __CreateStockset(self):
        """Creates stockset table."""
        tbl = self.H.createTable('/', 'stockset', StockSet)
#        tbl.cols.id.createIndex()
        
    def __CreateStocksetitem(self):
        """Creates stocksetitem table."""
        tbl = self.H.createTable('/', 'stocksetitem', StockSetItem)
#        tbl.cols.id.createIndex()
#        tbl.cols.idstockset.createIndex()
#        tbl.cols.idstock.createIndex()
    
    def __CreateAgent(self):
        """Creates agent table."""
        tbl = self.H.createTable('/', 'agent', Agent)
#        tbl.cols.name.createIndex()
        
    def __Open(self):
        self.H = openFile(self.filename, mode = 'r+')
        return self.H    
        
    def __GetNode(self, sNode):
        """Low-level equivalent of GetNode()."""
        return self.H.getNode(sNode) if isinstance(sNode, str) else sNode

    def __FirstRow(self, node, sWhere, flagErrorIfNotFound=0):
        """Low-level equivalent of FirstRow()
        
        Arguments:
            node -- table node
            sWhere -- condition
            flagErrorIfNotFound=0 -- obvious
            
        If there is no match, returns None.
        """
        row = None
        for row_ in node.where(sWhere):
            row = row_
            break
        if not row and flagErrorIfNotFound:
            raise KeyNotFound("Table \"%s\": not found (%s)" % 
                             (node.name, sWhere))
        return row #.table[row.nrow]

    def __GetNewId(self, node):
        """Finds lv = last value of the "id" column and returns (lv+1)."""
        return 1 if len(node) == 0 else node.col("id")[-1]+1
            

    def __Delete(self, node, sWhere):
        """Generic deletion routine, deletes rows satisfying condition."""
        r = self.__FirstRow(node, sWhere)
        if r:
            node.removeRows(r.nrow)

    def __MakeTuples(self, ll):
        """Converts list of numpy.void to list of tuples.
        
        I was having problems to pickle numpy.void, didn't find a solution,
        decided to tuplify."""
        return [tuple(item) for item in ll]

    def __FindTempName(self, name):
        """Finds name for a new temporary table within the "/tmp" group.
        
        Tries "<name>" first, then "<name><nnnn>"."""
        newName = name
        i = 0
        MAXTRIES = 213 # arbitrary number
        while True:
            if i == MAXTRIES:
                raise RuntimeError(("Gave up trying to find a temporary name "+\
                      "after %d tries") % MAXTRIES)
            if not self.H.__contains__("/tmp/"+newName):
                break
            newName = "%s-%04d" % (name, np.random.randint(0, 9999))
        return newName

    def __MoveAndNew(self, node):
        """Moves node to /tmp and creates new (without index though).
        
        Returns: (node, newNode), where node represents the moved table,
                 and newNode is the new, empty table."""
        name, path =  node.name, node._v_parent._v_pathname
        tmpName = self.__FindTempName(name)
#        self.logger.debug("Will try to move node %s/%s to /tmp/%s" % (path, name, tmpName))
        node.move(newparent="/tmp", newname=tmpName)
#        self.logger.debug("MOVED node %s to /tmp/%s" % (name, tmpName))
        newNode = self.H.createTable(path, name, node.description) # Will become new table
        node = self.__GetNode("/tmp/"+tmpName)
        self.H.flush()
        return (node, newNode)
    
    def __DeleteAllRows(self, node):
        """Actually it deletes and creates new table with same structure.
        
        Note 1: new node is returned and reference to current node is lost.
        Node 2: Indexes are NOT restored."""
        path, name =  node._v_parent._v_pathname, node.name
        descr = node.description
        node.remove()
        node = self.H.createTable(path, name, descr) # Will become new table
        self.H.flush()
        return node

    ###########################################################################
    ## Table-specific functions

    #### STOCK ####

    @misc.CatchException
    def stock_Insert(self, supermarket, market, symbol, name, description="", 
                     sanity=1):
        # TODO: check uniqueness of name
        with self.R:
            self.AssertOpen()
            tbl = self.__GetNode('/stock')
            r = tbl.row
            r["id"] = self.__GetNewId(tbl)
            r["supermarket"] = supermarket
            r["market"] = market
            r["symbol"] = symbol
            r["name"] = name
            r["description"] = description
            r["sanity"] = sanity
            r.append()
            self.H.flush()

    #### STOCKSET ####
    @misc.CatchException
    def stockset_Insert(self, name, description=""):
        # TODO: check uniqueness of name
        with self.R:
            self.AssertOpen()
            tbl = self.__GetNode('/stockset')
            r = tbl.row
            r["id"] = self.__GetNewId(tbl)
            r["name"] = name
            r["description"] = description
            r.append()
            self.H.flush()

    @misc.CatchException
    def stockset_GetItemsByKey(self, key):
        """
        Returns stockset items whose stockset matches key.
        
        Key can be either the id or the name of a stockset.
        
        Returns: list of tuples according to the database.Stock definition.
        """
        with self.R:
            self.AssertOpen()
            if isinstance(key, str):
                node = self.__GetNode("/stockset")
                row = self.__FirstRow(node, "name == \"%s\"" % key)
                if row is None:
                    return [] #raise KeyNotFound("Stockset name not found: \"%s\"" % key)
                id = row[0]
            else:
                id = key
            
            temp = self.ReadWhere("/stocksetitem", "idstockset == %d" % id)
            if len(temp) == 0:
                return []
            a = []
            for row in temp:
                # "row[2]"??? See definition of stocksetitem table in
                # database.py;
                # I miss SQL's joins
                temp2 = self.ReadWhere("/stock", \
                    "id == %d" % row[2]) 
                a.append(temp2[0])
            return self.__MakeTuples(a)

    def stockset_GetNamesByKey(self, key):
        """
        Returns list containing only the Stock.name field
        
        The list contains the stocks belonging to the stockset that matches key.
        
        Key can be either the id or the name of a stockset.
        
        Returns [name1, name2, ...]
        """
        return [x[4] for x in self.stockset_GetItemsByKey(key)]




    #### STOCKSETITEM ####
        
    @misc.CatchException
    def stocksetitem_Insert(self, idstockset, idstock):
        with self.R:
            self.AssertOpen()
            self.__stocksetitem_InsertMany(idstockset, [idstock])
            
    @misc.CatchException
    def stocksetitem_InsertMany(self, idstockset, idstocks):
        with self.R:
            self.AssertOpen()
            self.__stocksetitem_InsertMany(idstockset, idstocks)

    def __stocksetitem_InsertMany(self, idstockset, ids_stocks):
        """Inserts many items at once. Foolproof, inserts only non-existing."""
        node = self.__GetNode("/stocksetitem")
        # Gets all items in stockset
        existing = node.readWhere("idstockset == %d" % idstockset)
        existing = existing["idstock"]
        
        # Makes list of tuples to insert
        id0 = self.__GetNewId(node)
        data = []
        for x in ids_stocks:
            if x not in existing:
                data.append((id0, idstockset, x))
                x += 1
        
        # Appends
#        self.logger.debug("Gonna insert %d Stockset Items" % len(data))
        if len(data) > 0:
            node.append(data)
            self.H.flush()
#        self.logger.debug("Inserted %d Stockset Items successfully" % len(data))

     
    @misc.CatchException
    def stocksetitem_Delete(self, **kwargs):
        """Deletes row from stocksetitem table.
        
        KEYWORD arguments:
            id -- stocksetitem id
          OR
            idstockset
            idstock
        """
        with self.R:
            self.AssertOpen()
            node = self.__GetNode("/stocksetitem")
            if kwargs.has_key("id"):
                self.__Delete(node, "id == %d" % kwargs["id"])
            else:
                self.__Delete(node, 
                    "(idstockset == %d) & (idstock == %d)" %
                    (kwargs["idstockset"], kwargs["idstock"]))
            self.H.flush()
            
    def stocksetitem_DeleteMany(self, idstockset, ids_stocks):
        """Foolproof deletion of many rows in stocksetitem table.
        
        This function actually calls __stocksetitem_DeleteMany()"""
        with self.R:
            self.AssertOpen()
        self.__stocksetitem_DeleteMany(idstockset, ids_stocks)
        

    def __stocksetitem_DeleteMany(self, idstockset, ids_stocks):
        """Foolproof deletion of many rows in stocksetitem table."""
        node = self.__GetNode("/stocksetitem")
        # Gets all items in stockset
        existing = node.readWhere("idstockset == %d" % idstockset)
        existing = existing["idstock"]
        
        # Keeps only existing items
        ids_stocks = [x for x in ids_stocks if x in existing]
        
        # Deletes
#        self.logger.debug("Gonna DELETE %d Stockset Items" % len(ids_stocks))
        for idstock in ids_stocks:
            self.__Delete(node, 
                "(idstockset == %d) & (idstock == %d)" %
                (idstockset, idstock))
#        self.logger.debug("Effectivele DELETED %d Stockset Items" % len(ids_stocks))


    #### AGENT ####
        
    @misc.CatchException
    def agent_Insert(self, name, data):
        with self.R:
            self.AssertOpen()
            tbl = self.__GetNode('/agent')
            r = tbl.row
            r["id"] = self.__GetNewId(tbl)
            r["name"] = name
            r["data"] = SerializeAgentData(data)
            r.append()
            self.H.flush()
        
    @misc.CatchException
    def agent_Update(self, id=None, name=None, data=None):
        """
        Updates row in agent table. The key can be either id or name.
        
        If id is passed, the name and/or data can be changed.
        
        If name is passed, but not id, then:
           - If the name exists, data is updated;
           - It the name does not exist, new agent is inserted.
        
        """
        with self.R:
            if id is None and name is None:
                raise InvalidArgument("Either id or name must be specified!")
            
            self.AssertOpen()
            if id is not None:
                row = self.FirstRow("/agent", "id == %d" % id, True)
                if name is not None:
                    # TODO: check duplicates!!!
                    row["name"] = name
                if data is not None:
                    row["data"] = SerializeAgentData(data)
                row.update()
            else:
                row = self.FirstRow("/agent", "name == \"%s\"" % name, False)
                if row:
                    if data is not None:
                        row["data"] = SerializeAgentData(data)
                        row.table.modifyRows(row.nrow, rows=[[row.fetch_all_fields()]])
                        #row.update()
                else:
                    self.agent_Insert(name, data)
            self.H.flush()

    @misc.CatchException
    def agent_GetData(self, id=None, name=None):
        """
        Returns unserialized agent data. The key can be either id or name.
        
        id has precedence over name.
        
        The result is a dictionary. If agent data does not exist, returns {}
        """
        with self.R:
            self.AssertOpen()
            if id is None and name is None:
                raise InvalidArgument("Either id or name must be specified!")
            
            node = self.__GetNode("/agent")
            if id is not None:
                row = self.__FirstRow(node, "id == %d" % id, False) #True)
            else:
                row = self.__FirstRow(node, "name == \"%s\"" % name, False) #True)
            
            return UnserializeAgentData(row[2]) if row is not None else {}

    ###########################################################################
    #### QUOTE/Time Data ####
    
    def __td_GetNode(self, sName, Q=None, flagCreate=False):
        """sName is full path of node. Creates node if does not exist.
        
        CAUTION: currently closing and reopening database, makes all node references
                 to be lost!
        
        Arguments:
            sName -- stock name
            Q -- (Optional) timedata.TimeData object. Must be passed (and only
                 used) if flagCreate is True
            flagCreate -- whether to create node if it does not exist
        """

        if flagCreate and misc.isempty(Q):
            raise RuntimeError("Q must be passed if flagCreate is True")
        r = None
        flagHas = self.H.__contains__(sName)
        if not flagHas and flagCreate:
            nn = sName.split("/")[1:]
            p = "/"
            for i in range(0, len(nn)-1):
                if not self.H.__contains__(p+"/"+nn[i]):
                    self.H.createGroup(p, nn[i])
                p += ("/" if i > 0 else "")+nn[i]
            tbl = self.H.createTable(p, nn[-1], eval(Q._s_dbDescr))
            self.H.flush()
            self.H.close()
            self.AssertOpen()
            flagHas = True
        if flagHas:
            r = self.H.getNode(sName)
        return r

    @misc.CatchException
    def quote_GetMaxHistTimestamp(self, period, name):
# TODO Only Status will be checked because flagRealtime will no longer exist.
        """See _quote_GetMaxHistTimestamp()."""
        with self.R:
            self.AssertOpen()
            if period >= qp.daily:
                sNode = d_GetPath("quote", period, name)
                if self.H.__contains__(sNode):
                    node = self.__GetNode(sNode)
                    # >= daily only has history
                    return self.__quote_GetMaxTimestamp(node)
            else:
                mm = i_ReadingMap(period, misc.dt2ts(constants.IntradayMinDatetime),
                                     misc.dt2ts(datetime.datetime.today()))
                for m in mm[-1::-1]: # Backwards looking for existing table
                    sNode = i_GetPath(m["groupName"], "quote", period, name)
                    if self.H.__contains__(sNode):
                        node = self.__GetNode(sNode)
                        ts = self.__quote_GetMaxHistTimestamp(node)
                        if ts is not None:
                            return ts
            return None

    def __quote_GetMaxTimestamp(self, node):
        """Returns maximum recorded timestamp (either historical or realtime).
       
        Returns None if the table is empty."""
        numRows = len(node)
        return node[numRows-1]["timestamp"] if numRows > 0 else None
            
    def __quote_GetMinTimestamp(self, node):
        """Returns minimum recorded timestamp (either historical or realtime).
       
        Returns None if the table is empty."""
        return node[0]["timestamp"] if len(node) > 0 else None

    def __quote_GetMaxHistTimestamp(self, node):
        """Returns maximum recorded historical timestamp.
       
        Goes backwards until finds a row with flagRealtime set to zero.
        Aftermarket quotes are also ignored unless they are from today.
        
        Returns None if the table is empty or reaches the top.
        """
        numRows = len(node)
        r = None
        ptr = numRows-1
        today = misc.dt2ts(datetime.date.today())
        while ptr >= 0:
            if not node[ptr]["flagRealtime"] and \
               (node[ptr]["status"] == timedata.QuoteStatus.Regular or \
                node[ptr]["timestamp"] >= today):
                r = node[ptr]["timestamp"]
                break
            ptr -= 1
        return r

    @misc.CatchException
    def quote_Purge(self, **kwargs):
        """Deletes quotes tables from a given stock. Returns # deleted nodes
        
        Keyword arguments:
            period -- (Optional) quoteperiod.daily etc. If it is not passed,
                will get rid of all quote tables from the given stock name
            name -- stock name. NOT OPTIONAL.
        """
        with self.R:
            self.AssertOpen()
            name = kwargs["name"]
            prefix = "quote__"+name
            period = kwargs.get("period", None)

            
            periods = [period] if period is not None else range(0, qp.yearly+1)
            
            n = 0
            for period in periods:
                n += self.__td_Purge(period, prefix)
            return n

    @misc.CatchException
    def td_Purge(self, indicator):
        """Deletes all nodes related to an indicator. Returns # deleted nodes"""
        with self.R:
            self.AssertOpen()
            prefix = misc.GoodVarname(indicator.name)
            period = indicator.GetPeriod()
            n = self.__td_Purge(period, prefix)
        return n

    def __td_Purge(self, period, prefix):
        """Deletes all nodes related to an indicator.
        
        TODO: I can later use this to delete quotes tables, but will have to refactory"""
        s1 = "/"+qp.data[period]["varname"]
        if period < qp.daily:
            n = 0
            nn = self.H.getNode(s1)._v_children
            for name in nn.keys():
                node = self.H.getNode(s1+"/"+name)
                n += self.__td_Purge2(node, prefix)
        else:
            n = self.__td_Purge2(self.H.getNode(s1), prefix)
        return n

    def __td_Purge2(self, node, prefix):
        """Deletes all children of node starting with prefix."""
        sPath = node._v_pathname
        self.logger.debug("Analysing children of node ``%s``" % (sPath))
        n = 0
        for name in node._v_children.keys():
            if re.match("^"+prefix, name):
                sFullPath = sPath+"/"+name
                self.logger.info("Removing node ``%s``" % sFullPath)
                self.H.removeNode(sFullPath)
                n += 1
        return n

    @misc.CatchException
    def td_GetData(self, indicator, name, dt1, dt2):
        """See td_GetData2()."""
        return self.td_GetData2(indicator._TDClass,
            misc.GoodVarname(indicator.name), indicator.period, name, dt1, dt2)

    @misc.CatchException
    def td_GetData(self, indicator, name, dt1, dt2):
        """See td_GetData2().
        Returns timedata.TimeData"""
###        (Q, dummy1, dummy2) = self.td_GetData2(indicator._TDClass,
###            misc.GoodVarname(indicator.name), indicator.period, name, dt1, dt2)
        Q = self.td_GetData2(indicator._TDClass,
            misc.GoodVarname(indicator.name), indicator.period, name, dt1, dt2)
        return Q

    @misc.CatchException
    def quote_GetData(self, period, name, dt1, dt2):
        """Returns timedata.Quotes
        
        Arguments:
          period -- value from within quoteperiod module
          name -- stock name
          dt1, dt2 -- datetime.datetime or timestamp values
        
        Result:
            quotes -- timedata.Quotes
            ts1, ts2 -- canonical timestamps
        """
###        (Q, dummy1, dummy2) = self.td_GetData2(timedata.Quotes, "quote", 
###                              period, name, dt1, dt2)
        Q = self.td_GetData2(timedata.Quotes, "quote", 
                              period, name, dt1, dt2)
        return Q
    
    def td_GetData2(self, TDClass, prefix, period, name, dt1, dt2, flagSlice=True):
        """Generic routine to get a TimeData object.

        Same as td_GetData() but with different set of parameters.

        Arguments:
            TDClass -- a timedata.TimeData descendant class object
            prefix -- node prefix, such as "quote", or an Indicator name
            period -- qp.daily etc
            name -- stock name
            dt1, dt2 -- datetime.datetime or timestamp values
            flagSlice=True -- Whether to slice to return TimeData within
                              dt1, dt2 range. If False, will return the chunk
                              concatenation.
            
        Returns: timedata.TimeData
        """
        with self.R:
            self.AssertOpen()
###            ts1, ts2 = None, None
            #if FLAG_PROFILE: tic0 = time.time()
            #if FLAG_PROFILE: self.logger.debug("Took %.3g seconds to read from database" % (time.time()-tic))
            ts1, ts2 = misc.dt2ts(dt1), misc.dt2ts(dt2)
            if period >= qp.daily:
                Q = TDClass()
                node = self.__td_GetNode(d_GetPath(prefix, period, name),
                    Q, False)
                if node is not None:
                    Q.SetData(node.read())
###                    if len(Q) > 0:
###                        ts1 = min(misc.dt2ts(dt1), Q.timestamp[0])
###                        ts2 = max(misc.dt2ts(dt2), Q.timestamp[-1])
###                    else:
###                        ts1 = misc.dt2ts(dt1)
###                        ts2 = misc.dt2ts(dt2)
            else:
                if period == qp.secondly:
                    mm = secondly_ReadingMap(ts1, ts2)
                else:
                    mm = i_ReadingMap(period, ts1, ts2)
                a = []
                for m in mm:
                    Q = TDClass()
                    node = self.__td_GetNode(
                        i_GetPath(m["groupName"], prefix, period, name),
                            Q, False)
                    if node is not None:
                        Q.SetData(node.read())
                        a.append(Q)
                # TODO: profile this concatenation X single recArray concatenation
                Q = Q.Concatenate(a)
            return Q.GetTimestampSlice(ts1, ts2) if flagSlice else Q

    @misc.CatchException
    def td_Record(self, indicator, name, Q, flagRSAN=False):
        """Generic TimeData recorder.
        
        TODO: atm I haven't developed the indicators yet, so I don't know exactly the class of the indicator argument
        
        Arguments:
            indicator -- ?Indicator object that generated Q
            period -- qp.min1 etc
            name -- stock name
            Q -- timedata.TimeData object
            flagRSAN=False -- "Record Secondly As Normal". If set, secondly
                data will be recorded using the "normal" routine, which is
                __td_Record1, instead of the default __secondly_Record.
        """
        prefix = misc.GoodVarname(indicator.name)
        period = indicator.GetPeriod()
        return self.__td_Record(prefix, period, name, Q, flagRSAN)
    
    @misc.CatchException
    def quote_Record(self, period, name, Q, flagRSAN=False):
        return self.__td_Record("quote", period, name, Q, flagRSAN)

    def __td_Record(self, prefix, period, name, Q, flagRSAN):
        """Recording with chunk management.
        """
        if len(Q) == 0:
            return
        with self.R:
            self.AssertOpen()
            if period == qp.secondly:
                if not flagRSAN:
                    self.__secondly_Record(prefix, name, Q)
                else:
                    rm = secondly_ReadingMap(Q.timestamp[0], Q.timestamp[-1])
                    wm = WritingMap(rm, Q.timestamp)
                    for m in wm:
                        node = self.__td_GetNode(
                            i_GetPath(m["groupName"], prefix, period, name),
                            Q, True)
                        self.__td_Record1(node, Q[m["idx1"]:m["idx2"]])
            elif period < qp.daily:
                mm = i_WritingMap(period, Q.timestamp)
                for m in mm:
                    node = self.__td_GetNode(
                        i_GetPath(m["groupName"], prefix, period, name),
                        Q, True)
                    self.__td_Record1(node, Q[m["idx1"]:m["idx2"]])
            else:
                node = self.__td_GetNode(d_GetPath(prefix, period, name), 
                    Q, True)
                self.__td_Record1(node, Q)
            self.logger.debug("$$$ %s: recorded %d quotes" % (name, len(Q)))
#            for (i, ts) in enumerate(Q.timestamp):
#                self.logger.debug("$$$ datetime[%d] = %s" % (i, misc.dt2str(misc.ts2dt(ts))))

    def __td_Record1(self, node, Q):
        """Appends/replaces/inserts records in a Quote table
        
        Arguments:
            node -- Table object
            Q -- timedata.Quotes object
        
        Attempts to replace existing times.
        """
        if Q is None or len(Q) == 0:
            return
        X = Q.Pack()
        exMin = self.__quote_GetMinTimestamp(node)
        exMax = self.__quote_GetMaxTimestamp(node)
        tsMin = Q.timestamp[0]
        tsMax = Q.timestamp[-1]
        try:
            if exMax is None or tsMin > exMax:
                #  |-Existing-|
                #               |-New-|
###                self.logger.debug("EITCHAAAAAAAAAAAAAAAAAAAAAAAAAAAAA % d %s" % (len(Q), Q.__class__.__name__))
#                self.logger.debug(X)
                node.append(X)
            elif tsMin <= exMin and tsMax >= exMax:             
                #    |-Existing-|
                #  |----New-------|
                node = self.__DeleteAllRows(node)
                node.append(X)
            else:
                # temporary table required
                flagMoved = False
                try:
                    self.logger.debug("RRR %s: new min is %d and existing max is %d" % (node.name, tsMin, exMax))
                    self.logger.debug("RRR %s: new min is %s and existing max is %s" % (node.name, misc.ts2str(tsMin), misc.ts2str(exMax)))
                    (node, newNode) = self.__MoveAndNew(node)
                    flagMoved = True
                    if exMin < tsMin:
                        # |-Existing-...
                        #      |-New-...
                        node.whereAppend(newNode, "timestamp < %f" % tsMin)
                        self.logger.debug("RRR Copied everything before %s" % misc.ts2str(tsMin))
                    newNode.append(X)
                    self.logger.debug("RRR Inserted %d rows between %s and %s" % (len(Q), misc.ts2str(Q.timestamp[0]), misc.ts2str(Q.timestamp[-1])))
                    if exMax > tsMax:
                        # ...-Existing-|
                        # ...-New-|
                        node.whereAppend(newNode, "timestamp > %f" % tsMax)
                        self.logger.debug("RRRR Copied everything after %s" % misc.dt2str(misc.ts2dt(tsMax)))
                finally:
                    if flagMoved:
                        node.remove()
        finally:
            self.H.flush()


    ## Data interval deletion
    ## td_DeleteInterval     =>
    ## quote_DeleteInterval  => __td_DeleteInterval => __td_DeleteInterval1

    @misc.CatchException
    def td_DeleteInterval(self, indicator, name, dt1, dt2, **kwargs):
        """Deletes records satisfying dt1 <= timestamp <= dt2
        
        Arguments:
            indicator -- ?Indicator object that generated Q
            period -- (not optional) qp.min1 etc
            name -- (Optional) e.g. BOVESPA_VISTA_PETR4. If not passed,
                    will sweep all tables. "Not passing" means passing as
                    None or []
            dt1, dt2 -- timestamp or datetime accepted.
            
        Keyword arguments:
            flagSimulation=False -- if specified and True, does not delete

            
        If isempty(name), sweeps all tables.
        If isempty(period), sweeps all periods.

        """
        prefix = misc.GoodVarname(indicator.name)
        period = indicator.GetPeriod()
        return self.__td_DeleteInterval(prefix, period, name, dt1, dt2, **kwargs)

    @misc.CatchException
    def quote_DeleteInterval(self, period, name, dt1, dt2, **kwargs):
        """Deletes records satisfying dt1 <= timestamp <= dt2
        
        Arguments:
            period -- (Optional) qp.daily etc
            name -- (Optional) e.g. BOVESPA_VISTA_PETR4. If empty or not passed,
                    will sweep all tables
            dt1, dt2 -- timestamp or datetime accepted.
        
        Keyword arguments:
            flagSimulation=False
        
        If isempty(name), sweeps all tables.
        If isempty(period), sweeps all periods.

        """
###        if misc.isempty(name):
###            node = self.H.getNode("/stock")
###            names = node.read(field="name")
###            return [self.__td_DeleteInterval("quote", period, name, dt1, 
###                dt2) for name in names]
###        else:
        return self.__td_DeleteInterval("quote", period, name, dt1, dt2, **kwargs)


    def __td_DeleteInterval(self, prefix, period, name, dt1, dt2, **kwargs):
        """Interval deletion with chunk management.
        
        Arguments:
            prefix --
            period -- 
            name --
            dt1, dt2 -- either int (timestamp) or datetime
            
        Keyword arguments:
            flagSimulation=False
            
        If isempty(name), sweeps all tables.
        If isempty(period), sweeps all periods.
            
        """
        flagSimulation = kwargs.get("flagSimulation", False)
        with self.R:
            self.AssertOpen()
            ts1, ts2 = misc.dt2ts(dt1), misc.dt2ts(dt2)

            names = self.H.getNode("/stock").read(field="name") if misc.isempty(name)\
                    else [name]
            periods = range(qp.secondly, qp.yearly+1) if misc.isempty(period) \
                      else [period]

            # names is outer loop, deletes stock by stock
            n = 0 # Number of deleted
            for name_ in names:
                for period_ in periods:
                    self.logger.info("DDD To delete prefix=%s; period=%s; name=%s; dt1=%s; dt2=%s" % (prefix, qp.data[period_]["english"], name_, misc.dt2str(dt1), misc.dt2str(dt2)))
                    if period_ == qp.secondly:
                        rm = secondly_ReadingMap(ts1, ts2, False)
                        for m in rm:
                            node = self.__td_GetNode(
                                i_GetPath(m["groupName"], prefix, period_, name_))
                            if node is not None and not flagSimulation: # Of course node does not need to exist
                                n += self.__td_DeleteInterval1(node, m["tsMin"], m["tsMax"])
                    elif period_ < qp.daily:
                        rm = i_ReadingMap(period_, ts1, ts2, False)
                        for m in rm:
                            node = self.__td_GetNode(
                                i_GetPath(m["groupName"], prefix, period_, name_))
                            if node is not None and not flagSimulation:
                                n += self.__td_DeleteInterval1(node, m["tsMin"], m["tsMax"])
                    else:
                        node = self.__td_GetNode(d_GetPath(prefix, period_, name_))
                        if node is not None and not flagSimulation:
                            n += self.__td_DeleteInterval1(node, ts1, ts2)
            self.logger.debug("DDD ...Overall number of deleted rows: %d" % (n,))


    def __td_DeleteInterval1(self, node, tsMin, tsMax):
        """Deletes records satisfying tsMin <= timestamp <= tsMax
        
        Arguments:
            node -- Node
            tsMin, tsMax -- only timestamp accepted.
            
        Returns:
            number of deleted records
            
        This routine has same logic as __td_Record1() but does not have the
        calls to append() part.
        """
        exMin = self.__quote_GetMinTimestamp(node)
        exMax = self.__quote_GetMaxTimestamp(node)
        lenBefore = len(node)
        lenAfter = lenBefore
        try:
            if exMax is None or tsMin > exMax:
                #  |-Existing-|
                #               |-ToDelete-|
                pass
            elif tsMin <= exMin and tsMax >= exMax:             
                #    |-Existing-|
                #  |----ToDelete----|
                node = self.__DeleteAllRows(node)
                lenAfter = 0
            else:
                # temporary table required
                flagMoved = False
                try:
###                    self.logger.debug("DDD %s: to delete min is %s and existing max is %s" % (node.name, repr(misc.ts2dt(tsMin)), repr(misc.ts2dt(exMax))))
###                    self.logger.debug("DDD %s: to delete max is %s and existing min is %s" % (node.name, repr(misc.ts2dt(tsMax)), repr(misc.ts2dt(exMin))))
                    (node, newNode) = self.__MoveAndNew(node)
                    flagMoved = True
                    if exMin < tsMin:
                        # |-Existing------...
                        #      |-DoTelete-...
                        node.whereAppend(newNode, "timestamp < %f" % tsMin)
                    if exMax > tsMax:
                        # ...-Existing-|
                        # ...-New-|
                        node.whereAppend(newNode, "timestamp > %f" % tsMax)
                    lenAfter = len(newNode)
                finally:
                    if flagMoved:
                        node.remove()
        finally:
            self.H.flush()
        n = lenBefore-lenAfter
        self.logger.info("DDD ...deleted %d rows" % n)
        return n


    ### Realtime 
    
    def __secondly_Record(self, prefix, name, Q):
        """Secondly default recording."""
        if len(Q) == 0:
            return
        with self.R:
            self.AssertOpen()
            p = i_GetPath(secondly_GroupName(Q.timestamp), prefix, qp.secondly, name)
            node = self.__td_GetNode(p, Q, True)
            self.__secondly_Record1(node, Q)
            self.logger.debug("$$$ %s: recorded %d Secondy quotes" % (name, len(Q)))

    def __secondly_Record1(self, node, Q):
        """Similar to __td_Record1 but  different assumptions.
        
        Assumptions:
          - Q has always more recent or at least as up-to-date as existing
          - Overlap will be of 1 quote only and only first quote of Q
        
        Arguments:
            node -- Table object
            Q -- timedata.Quotes object
        """
        if Q is None or len(Q) == 0:
            return
        X = Q.Pack()
        numEx = len(node)
        exMax = node[numEx-1]["timestamp"] if numEx > 0 \
                else None # Maximim existing timestamp
        tsMin = Q.timestamp[0]
        try:
            if exMax is None or tsMin > exMax:
                #  |-Existing-|
                #               |-New-|
                node.append(X)
                self.logger.debug("New %d realtime quotes" % len(X))
            else:
                if tsMin < exMax:
                    self.logger.warning("New min is %s whereas existing max is %s. At least one quote will be ignored" % (misc.ts2dt(tsMin), misc.ts2dt(exMax)))
                    ts1 = misc.BSearchCeil(Q.timestamp, exMax)
                    X = X[ts1:]

                # |-Existing-|
                #            |-New-| (1 row overlap)
                
                # Updates last existing
                n = numEx-1
                self.logger.debug("old close: %.2f; new close: %.2f" % (node[n]["close"], Q.close[0]))
                node[n]["close"] = Q.close[0]
                node[n]["high"] = max(node[n]["high"], Q.high[0])
                node[n]["low"] = min(node[n]["low"], Q.low[0])
                node[n]["volume"] += Q.volume[0]
                node[n]["numTrades"] += Q.numTrades[0]
                self.logger.debug("Updated realtime quote")
                if len(X) > 1:
                    node.append(X[1:])
                self.logger.debug("Added some more %d quotes" % (len(X)-1))
                
                    
        finally:
            node.flush()
            self.H.flush()


################################################################################
## Errors

class DatabaseExists(Exception): pass
class InvalidArgument(Exception): pass
class KeyNotFound(Exception): pass
class StringTooBig(Exception): pass
class ProxyError(Exception):
    """Exception raised at failing to get database proxy."""
    pass
class BadArgument(Exception):
    """Raised when a function is called with missing or invalid arguments."""
    pass


################################################################################
## DB SERVER

class DBServer(threads.ServerThread):
    """
    Database server - starts a Pyro4 daemon to serve na.sys.db in a thread

    The server is in "multiplex" mode, meaning that method calls are processed
    sequentially.
    
    Note that it is the same db used locally by other modules.
    
    References:
      http://stackoverflow.com/questions/8508112/requestlooploopcondition-doesnt-release-even-after-loopcondition-is-false
    """
    
    def __init__(self):
        threads.ServerThread.__init__(self, name="DBServer")

    def run_(self):
        Pyro4.config.SERVERTYPE = "multiplex"
        #  "We need to set either a socket communication timeout,
        #   or use the select based server. Otherwise the daemon requestLoop
        #   will block indefinitely and is never able to evaluate the loopCondition.
        #  " -- http://stackoverflow.com/questions/8508112/requestlooploopcondition-doesnt-release-even-after-loopcondition-is-false
        Pyro4.config.COMMTIMEOUT=60.5
        db = na.vala.db
        # make a Pyro daemon
        self.pyroDaemon = Pyro4.Daemon(host=misc.AskUser())
        # find the name server
        logging.info("Locating the name server...")
        ns = Pyro4.locateNS()
        logging.info("Ready.")
        # register the custom object as a Pyro object
        uri = self.pyroDaemon.register(db)
        # register the object with a name in the name server
        ns.register("navala.Database", uri)
        
        self.SetStarted()
        
        # start the event loop of the server to wait for calls
        self.flagKeepRunning = True
        self.pyroDaemon.requestLoop(loopCondition=self.GetKeepRunning)
        logging.debug("run_() from DBServer reached its END")
        
    def GetKeepRunning(self):
        return self.flagKeepRunning
        
    def Exit(self):
        logging.debug("About to close DB server")
        self.flagKeepRunning = False
        logging.debug("Closed DB server OK")




################################################################################
## Auxiliary - agent

def SerializeAgentData(data):
    """
    Pickle's data.
    
    Checks if will fit in table.
    
    """
    #o = pkl.Pickler()
    s = pkl.dumps(data)
    oa = Agent()
    if len(s) > oa.columns["data"].itemsize:
        raise StringTooBig("Data serialization generated string too big")
    return s

def UnserializeAgentData(ss):
    """Unpickle's data."""
    dict = pkl.loads(ss)
    return dict


def GetDatabaseProxy():
    db = Pyro4.Proxy("PYRONAME:navala.Database")
    try:
        pass
        #DB.AssertOpen()
    except Exception as E:
        raise ProxyError("Could not get a valid proxy to the Database object "+\
                         "(original message: '%s')" % E.message)
    
    return db


################################################################################
## Auxiliary routines to find the way around with Quote tables


def d_GetPath(prefix, period, name):
    """Dumb routine to get node name, doesn't check whether period >= daily."""
    return "/%s/%s__%s" % (qp.data[period]["varname"], prefix, name)

def i_GetPath(groupName, prefix, period, name):
    """Dumb routine to get node name, doesn't whether period is intraday."""
    return "/%s/_%s/%s__%s" % (qp.data[period]["varname"], groupName, 
                                     prefix, name)

def i_ReadingMap(period, ts1, ts2, flagCanonical=True):
    """Returns [(groupName, tsMin, tsMax), ...]

    Arguments:
        ts1, ts2 -- Timestamps; boundaries of a timestamp vector to be written
                    into or read from the database.
        flagCanonical -- =True.
            False: result[0]["tsMin"] = ts1
                   result[end]["tsMax"] = ts2
            True: result[0]["tsMin"] and result[end]["tsMax"] are expanded to
                  their respective minimum/maximum possible

    Meaning of result:
        groupName -- Name of a group (node) in the database, e.g. "201201"
        tsMin -- Minimum timestamp that can be included in the interval.
        tsMax -- Maximum timestamp that can be included in the interval."""
    mpt = qp.data[period]["mpt"] # Months Per Table
    if mpt < 0:
        raise RuntimeError("Months Per Table is < 0, probably using i_ReadingMap() with period >= Daily")
    delta = rd.relativedelta(months=mpt)
    dt1 = misc.ts2dt(ts1)
    # Note that dates lower than 2012 won't work here
    monthCanonical = ((dt1.year-2012)*12+(dt1.month-1))//mpt*mpt+1 
    dtCanonical = dt1.replace(year=monthCanonical//12+2012,
        month=(monthCanonical-1)%12+1, day=1, hour=0, minute=0,
        second=0, microsecond=0)
    tsMin = misc.dt2ts(dtCanonical) #ts1
    i = 0
    flagExit = False
    r = np.zeros((100,), dtype=[('groupName', '|S10'),
                                ('tsMin', 'i8'), ('tsMax', 'i8')])
    while not flagExit:
        dtCanonicalNext = dtCanonical+delta
        tsMinNext = misc.dt2ts(dtCanonicalNext)
        tsMax = tsMinNext-60 # TODO: Could be 1 second instead of 60
        if tsMax >= ts2:
#            tsMax = ts2 # TODO: is this right really? Yeah, now there is flagCanonical but not properly tested yet
            flagExit = True
        r[i] = (dtCanonical.strftime("%Y%m"), 
                ts1 if i == 0 and not flagCanonical else tsMin, 
                ts2 if flagExit and not flagCanonical else tsMax)
        if flagExit:
            continue
        dtCanonical = dtCanonicalNext
        tsMin = tsMinNext
        i = i+1
        if i >= 100:
            raise RuntimeError("Interval too big for reading map, try shortening interval")
    return np.resize(r, (i+1, ))


def secondly_ReadingMap(ts1, ts2, flagCanonical=True):
    """Realtime reading map. Returns [(groupName, tsMin, tsMax), ...]

    Arguments:
        ts1, ts2 -- Timestamps; boundaries of a timestamp vector to be written
                    into or read from the database.

    Meaning of result:
        groupName -- In this case, YYYYMMDD
        tsMin -- Minimum timestamp that can be included in the interval.
        tsMax -- Maximum timestamp that can be included in the interval."""
    delta = datetime.timedelta(days=1)
    dt1 = misc.ts2dt(ts1)
    dtCanonical = misc.BeginningOfDay(dt1)
    tsMin = misc.dt2ts(dtCanonical) #ts1
    i, flagExit = 0, False
    r = np.zeros((100,), dtype=[('groupName', '|S10'),
                                ('tsMin', 'i8'), ('tsMax', 'i8')])
    while not flagExit:
        dtCanonicalNext = dtCanonical+delta
        tsMinNext = misc.dt2ts(dtCanonicalNext)
        tsMax = tsMinNext-1
        if tsMax > ts2:
 #           tsMax = ts2 # TODO: is this right really? Yeah, now using this or not is controlled by flagCanonical, but not propertly tested yet
            flagExit = True
        r[i] = (dtCanonical.strftime("%Y%m%d"), 
            ts1 if i == 0 and not flagCanonical else tsMin, 
            ts2 if flagExit and not flagCanonical else tsMax)
        if flagExit:
            continue
        dtCanonical, tdMin, i = dtCanonicalNext, tsMinNext, i+1
        if i >= 100:
            raise RuntimeError("Interval too big for reading map, try shortening interval")
    return np.resize(r, (i+1, ))


def WritingMap(rm, tt):
    """Complements a reading map with indexes searched from within tt.
    
    Arguments:
        rm -- result of i_ReadingMap() or secondly_ReadingMap
        tt -- timestamp vector
    """
    n = len(rm)
    r = rf.merge_arrays((rm, 
        np.zeros((n,), dtype=[('idx1', 'i8'), ('idx2', 'i8')])), flatten=True)
    for i in range(n):
        r[i]["idx1"] = 0 if i == 0 else r[i-1]["idx2"]+1
        r[i]["idx2"] = len(tt) if i == n-1 \
                       else misc.BSearchFloor(tt, r[i]["tsMax"])+1
    return r


def i_WritingMap(period, tt):
    """Returns [(groupName, tsMin, tsMax, idx1, idx2), ...]

    Calls i_ReadingMap() and WritingMap()
    
    Arguments:
        tt -- Timestamp vector.
    
    Meaning of result fields:
        groupName, tsMin, tsMax -- see i_ReadingMap()
        idx1, idx2 -- indexes within tt indicating the boundaries of a
                      chunk. Note that idx2 is specified to used Python 
                      indexing e.g., tt[idx1:idx2], i.e.,
                      from idx1 to idx2-1
    """
    rm = i_ReadingMap(period, tt[0], tt[-1])
    return WritingMap(rm, tt)


def secondly_GroupName(tt):
    """Produces group name as YYYYMMDD given timestamp.
    
    All realtime quotes in a single writing operation belong to same day.
    """
    return misc.ts2dt(tt[0]).strftime("%Y%m%d")
    
    
#def _i_EmptyMap(n=100):
#    r = np.zeros((100,), dtype=[('groupName', '|S10'),
#                                ('tsMin', 'i8'), ('tsMax', 'i8')])
