import numpy as np
import scipy.special as sp
import scipy.stats as sps
import copy
import math
import sys
import operator
import xml.etree.ElementTree as elet


class Actor(object):
    """
    Actor class.
    an actor in the observed video segment (or a template actor in a model
    specification)
    """

    PERSON = "Person"

    def __init__(self, null=False):
        if null:
            """Null actor object"""
            self.name = Actor.NULL
            self.bound = False
            self.atype = Actor.NULL
            self.starttime = -1
            self.endtime = -1
            self.ongoing = False
            # self.trackSegs = None
            self.locations = []
            self.regions = []
            self.bounding_boxes = []
        else:
            self.name = ""
            self.bound = False
            self.atype = ""
            self.starttime = -1
            self.endtime = -1
            self.ongoing = False
            self.locations = []
            self.regions = []
            self.bounding_boxes = []
            self.dimensionality = 0  # used to select distance metric (meters
            # vs widths)

    def make_evidence_from_xml(self, xml):
        """
        xml is a list of "interval" elements, all corresponding to this actor

        returns a list of TrackSegment objects corresponding to the intervals in
        the argument list
        """
        result_list = []
        offset = self.starttime
        for x in xml:
            attributes = x.attrib
            start_time = np.int(attributes["start_time"])
            end_time = np.int(attributes["end_time"])
            segment_type = attributes["label"]
            offset_start_time = start_time - offset
            offset_end_time = end_time - offset
            location_vector = self.locations[offset_start_time:
                                             offset_end_time+1]
            regions_vector = self.regions[offset_start_time:offset_end_time+1]
            bounding_boxes_vector = self.bounding_boxes[offset_start_time:
                                                        offset_end_time+1]
            temp_evidence = TrackSegment(
                actor=self, segtype=segment_type, starttime=start_time,
                endtime=end_time, locations=location_vector,
                regions=regions_vector, bounding_boxes=bounding_boxes_vector)
            result_list.append(temp_evidence)
        return result_list

    def __eq__(self, other):
        if not isinstance(other, Actor):
            return False
        return ((self.name == other.name) and (self.bound == other.bound) and
                (self.atype == other.atype))

    def __ne__(self, other):
        if not isinstance(other, Actor):
            return True
        return ((self.name != other.name) or (self.bound != other.bound) or
                (self.atype != other.atype))

    def __str__(self):
        rstring = ("**Actor** [name:%s,bound:%s,starttime:%s,endtime:%s]" %
                   (self.name, self.bound, self.starttime, self.endtime))
        return rstring

    """
    string constants
    """
    NULL = "null"
    PERSON = "Person"


class Evidence(object):
    """
    generic evidence object, also contains segment label constants
    """

    """
    Segment Labels:

    MOVE_FAST:
    label for a track segment in which the actor is moving faster than normal

    WAKE:
    label for an evidence object used to alert local models to the passage of
    time

    BASE_SEG:
    basic track segment (chunked to a default window length, not according to
    discovered endpoints)

    RAW_SEG:
    unprocessed track segment (in the original output format)
    """
    
    MOVE = "Move"
    MOVE_FAST = "MoveFast"
    MOVE_SLOW = "MoveSlow"
    NOT_MOVE = "NotMove"
    WAKE = "Wake"
    BASE_SEG = "BasicSegment"
    RAW_SEG = "RawSegment"
    POS_STABLE = "PositionStable"

    def __init__(self, **kwargs):
        self.actor = None
        self.segtype = ""
        self.starttime = -1
        self.endtime = -1
        kwargElts = ["actor", "segtype", "starttime", "endtime"]
        for elt in kwargElts:
            if elt in kwargs.keys():
                setattr(self, elt, kwargs[elt])
        self.oStartTime = self.starttime
        self.oEndTime = self.endtime
        self.bound = False
        self.willBind = True
        self.endsNode = True

    def __eq__(self, other):
        if not self.tC(other):
            return False
        if (self.actor is None) and (other.actor is None):
            return self.segtype == other.segtype
        return ((self.actor.name == other.actor.name) and (self.segtype ==
                                                           other.segtype))

    def __ne__(self, other):
        return not (self == other)

    def tC(self, other):
        return ((isinstance(self, type(other))) and (isinstance(other,
                                                                type(self))))

    def setBound(self):
        self.bound = True

    @staticmethod
    def matches(ev, template):
        """
        checks whether it's valid to replace template with ev in a hypothesis
        """
        if template.segtype != ev.segtype:
            if (template.segtype != Evidence.MOVE):
                return False
            if (ev.segtype != Evidence.MOVE_FAST) and (ev.segtype != Evidence.MOVE_SLOW):
                return False
        if (not template.actor.bound) or (template.actor == ev.actor):
            if not template.bound:
                return True
        return False

    def kStr(self):
        return (("**Evidence**:[actor:%s,segtype:%s,sTime:%s,eTime:%s,bound:%s"
                 "]") % (self.actor.name, self.segtype, self.oStartTime,
                         self.oEndTime, self.bound))

    def __str__(self):
        return ("**Evidence**:[actor:%s, segtype:%s, sTime:%s, eTime:%s,"
                "bound:%s]" % (self.actor.name, self.segtype, self.starttime,
                               self.endtime, self.bound))

    class Error(Exception):
        pass


class Time(Evidence):
    """
    evidence posted to blackboard by a knowledge source which keeps track of
    time passage

    used by models to adjust their scores as the likelihood of a predicted event
    happening changes over time
    """
    def __init__(self, **kwargs):
        super(Time, self,).__init__(**kwargs)
        self.actor = Actor(True)  # null actor

    def __eq__(self, other):
        if not self.tC(other):
            return False
        return ((self.starttime == other.starttime) and (self.segtype ==
                                                         other.segtype))

    def __ne__(self, other):
        return not (self == other)

    def setBound(self):
        pass


class TrackSegment(Evidence):
    """
    an actor's locations during some time window (used as evidence to make
    assertions about his movement patterns)

    also contains functions for summarizing some track attributes (e.g. speed)
    """

    def __init__(self, **kwargs):
        super(TrackSegment, self).__init__(**kwargs)
        kwarg_elts = ["locations", "regions", "bounding_boxes"]
        self.__dict__["locations"] = None
        self.__dict__["regions"] = None
        self.__dict__["bounding_boxes"] = None
        for elt in kwarg_elts:
            if elt in kwargs:
                setattr(self, elt, kwargs[elt])

    def __setattr__(self, name, value):
        if name == "locations" or name == "regions" or name == "bounding_boxes":
            if self.__dict__[name] is not None:
                raise self.Error(
                    "Attempting to directly access %s attribute -- use"
                    " set_boundaries() instead" % name)
        self.__dict__[name] = value

    def set_boundaries(self, start, end):
        """
        Changes this object's start and end times, and modifies its locations,
        regions and bounding_boxes attributes to reflect the updated time
        boundaries.  Used to ensure all attributes are current

        start and end are the indices used to slice actor's corresponding
        attributes, so this function behaves the same way (with respect to
        inclusion/exclusion of endpoints) as python's slice functionality
        """
        new_locations = self.actor.locations[start:end]
        new_regions = self.actor.regions[start:end]
        new_bounding_boxes = self.actor.bounding_boxes[start:end]
        self.__dict__["locations"] = new_locations
        self.__dict__["regions"] = new_regions
        self.__dict__["bounding_boxes"] = new_bounding_boxes

    def __eq__(self, other):
        if not self.tC(other):
            return False
        return ((self.actor.name == other.actor.name) and
                (self.locations == other.locations) and
                (self.segtype == other.segtype))

    def __ne__(self, other):
        return not (self == other)

    def __str__(self):
        return (("**TrackSegment**:[actor:%s,segtype:%s,sTime:%s,eTime:%s,"
                 "bound:%s]") % (self.actor.name, self.segtype, self.starttime,
                                 self.endtime, self.bound))

    def calculate_speed(self):
        if self.actor.dimensionality == 3:
            return self.calculate_speed_3d()
        if self.actor.dimensionality == 2:
            return self.calculate_speed_2d()
        raise Evidence.Error(
            "Calculate Speed was not implemented for this type of track"
            "evidence (or actor object was incorrectly instantiated)")

    def calculate_speed_2d(self):
        """
        Calculates the actor's speed across this interval, but in units of
        actor-widths, since pixel sizes for different videos could correspond to
        vastly different distances in meters
        """
        speed_3d = self.calculate_speed_3d()
        return speed_3d / self.actor.width

    def calculate_speed_3d(self):
        locations = self.locations
        delta_time = float(len(locations)) / Model.FR
        x_0 = float(locations[0][0])
        z_0 = float(locations[0][1])
        x_t = float(locations[-1][0])
        z_t = float(locations[-1][1])
        delta_x = x_t - x_0
        delta_z = z_t - z_0
        delta_distance = math.sqrt(delta_x*delta_x + delta_z*delta_z)
        return delta_distance/delta_time

    def calculate_direction(self):
        locations = self.locations
        x_0 = locations[0][0]
        x_t = locations[-1][0]
        z_0 = locations[0][1]
        z_t = locations[-1][1]
        delta_x = float(x_t - x_0)
        delta_z = float(z_t - z_0)
        # the distance ratio we want is opposite over adjacent for the triangle
        # formed by the actor's movement, assuming a bird's eye view of the
        # scene
        #
        # in our coordinate system, x is the horizontal axis and z is the
        # vertical, so this ratio is delta_x / delta_z
        if delta_x == 0:
            # make sure we don't divide by zero
            if delta_z == 0:
                # this isn't correct since there's no vector if the person
                # doesn't move, but it needs to return something and throwing an
                # exception here is probably a bad idea since this will get
                # called in so many places
                return 0.
            else:
                # this is the case we actually want to catch, because here the
                # tangent function is defined, but we need to pass it
                # float("inf") instead of actually dividing
                distance_ratio = float("inf") if delta_z > 0 else float("-inf")
        else:
            distance_ratio = delta_z/delta_x
        angle = np.arctan(distance_ratio)
        return angle

    def calculate_distance_from_center(self):
        locations = self.locations
        # print "number of locations: %s" % len(locations)
        # print "locations: %s" % locations
        x_min = float("inf")
        y_min = float("inf")
        x_max = float("-inf")
        y_max = float("-inf")
        n_frames = float(len(locations))

        for loc in locations:
            x = loc[0]
            y = loc[1]
            if x < x_min:
                x_min = x
            if y < y_min:
                y_min = y
            if x > x_max:
                x_max = x
            if y > y_max:
                y_max = y

        x_center = x_min + (x_max - x_min)/2.
        y_center = y_min + (y_max - y_min)/2.

        distance_sum = 0.
        for loc in locations:
            x = loc[0]
            y = loc[1]
            x_distance = np.abs(x_center-x)
            y_distance = np.abs(y_center-y)
            current_distance = np.sqrt(x_distance*x_distance +
                                       y_distance*y_distance)
            distance_sum += current_distance

        avg_distance = distance_sum/n_frames

        return avg_distance/self.actor.width

    def calculate_distance_to_stationary(self, other, window=5):
        """
        Distance decreasing from stationary between track1 and track2 over the
        interval.
        That is, track1 is "moving-towards" track2.

        window -- number of frames to consider when finding average positions in
        time. (default = 5 frames)
        """

        #        track1 = self.locations
#        t1s = self.starttime
#        t1e = self.endtime
#
#        track2 = other.locations
#        t2s = other.starttime
#        t2e = other.endtime
#
#        start = interval[0]
#        end = interval[1]
#
#        if t1e < start or t2e < start or t1s > end or t2s > end:
#            return []
#
#        if t1s > t2e or t2s > t1e:
#            return []
#
#        if t1s > t2s:
#            t1sidx = 0
#            t2sidx = t1s-t2s
#        if t1s < t2s:
#            t1sidx = t2s-t1s
#            t2sidx = 0
#        if t1e > t2e:
#            t1eidx = -(t1e-t2e)
#            t2eidx = -1
#        if t1e < t2e:
#            t1eidx = -1
#            t2eidx = -(t2e-t1e)
#
#        # tracks with comparable (valid) intervals
#        vtrack1 = track1[t1sidx:t1eidx]
#        vtrack2 = track2[t2sidx:t2eidx]
#        assert ( len(vtrack1) == len(vtrack2) ), "Lengths of two tracks are not
#        equal!"
#        if len(vtrack1) == 1:
#            return []

        assert (self.starttime == other.starttime and self.endtime == other.endtime)
        assert (len(self.locations) == len(other.locations))
        track1 = self.locations
        track2 = other.locations
        
        x1 = []; z1 = []; x2 = []; z2 = []
        for i in range(len(self.locations)):
            x1.append(track1[i][0])
            z1.append(track1[i][1])
        for i in range(len(other.locations)):
            x2.append(track2[i][0])
            z2.append(track2[i][1])

        dist1 = []
        dist2 = []

        while (len(x1) < 2*window):
            window = window / 2
        if window == 0:
            return 0

        for i in range(window, len(x1)-window+1):
            # set of track1 points at previous times
            px1_1 = x1[i-window:i]
            pz1_1 = z1[i-window:i]
            px11_ave = sum(px1_1) / len(px1_1)
            pz11_ave = sum(pz1_1) / len(pz1_1)
            p11 = (px11_ave, pz11_ave)

            # set of track1 points at current times
            px1_2 = x1[i:i+window]
            pz1_2 = z1[i:i+window]
            px12_ave = sum(px1_2) / len(px1_2)
            pz12_ave = sum(pz1_2) / len(pz1_2)
            p12 = (px12_ave, pz12_ave)

            # set of track2 points at previous times
            px2_1 = x2[i-window:i]
            pz2_1 = z2[i-window:i]
            px21_ave = sum(px2_1) / len(px2_1)
            pz21_ave = sum(pz2_1) / len(pz2_1)
            p21 = (px21_ave, pz21_ave)

            dist1.append(
                np.sqrt(
                    np.square(p11[0]-p21[0]) + np.square(p11[1]-p21[1])))
            dist2.append(
                np.sqrt(
                    np.square(p12[0]-p21[0]) + np.square(p12[1]-p21[1])))
        return [dist2[j] - dist1[j] for j in range(len(dist1))]


class Node(object):
    """
    a node in a model

    nodes contain lists of subjects, objects and indirect objects which refer to
    that node's activity (node activities are sub-activities at the model level)

    they also contain vectors of evidence which are initially used to indicate
    what types of evidence a node describes and later to store observed evidence
    (once a binding for a node has been found)
    """
    i = 0

    def __init__(self):
        self.name = ""
        self.subjects = []
        self.objects = []
        self.ongoing = True
        self.lfunction = lambda x: [.5]
        self.pfunction = lambda x: [.5]
        self.parents = []
        self.evvecs = None
        self.likelihood = None
        self.ltype = "likelihood"
        self.starttime = -1
        self.endtime = -1
        self.ind_objects = []
        self.prior = np.log(.01)
        self.eDen = lambda x: 0
        # log of 1 so doesn't affect probabilities if undefined
        self.atEnd = True

    def __str__(self):
        return ("**node** [name: %s, id: %s, subjects: [%s] objects: [%s]"
                "parents: [%s]" % (self.name, self.i,
                                   ",".join([x.name for x in self.subjects]),
                                   ",".join([x.name for x in self.objects]),
                                   ",".join([lambda x:
                                             ("(%s,%s)" % (x.name, x. i)) for x
                                             in self.parents])))

    def getID(self):
        """
        this function should query the blackboard to see if a node with these
        parameters already exists. if one does, it should get that node's ID;
        otherwise it should return a new ID
        """
        self.i = Node.i
        Node.i += 1


class TimeNode(Node):
    def __init__(self):
        self.name = ""
        self.subjects = []
        self.objects = []
        self.ind_objects = []
        self.ongoing = True
        self.lagfunction = lambda x: [.5]
        self.likelihood = None
        self.timeDensity = lambda x, y: [.5]
        self.starttime = -1
        self.endtime = -1

    def query(self, ctime=None):
        if ctime is None:
            return self.lfunction(self.evvecs)
        return self.lfunction(self.evvecs, ctime)


class Model(object):
    """
    This class contains:

    Our representation of a given model (in terms of node structure and
    conditional distributions for evidence)

    Our current list of hypotheses for that model (based on the evidence we've
    observed to date)

    Functions to generate new hypotheses based on incoming evidence, to
    calculate the joint conditional likelihood of a hypothesis's evidence, and
    to generate XML output for the blackboard
    """
    FR = 29.97
    # movie framerate, used to convert per-frame actor locations into speed
    # values with more useful units

    def __init__(self):
        self.name = ""
        self.nodes = []
        self.hypotheses = []
        self.subscriptions = []
        self.topNode = None
        self.timeNodes = dict()
        self.independent = False
        self.hypDict = dict()
        self.prior = .5
        self.hillStep = 45  # Distance in frames to shift a boundary at first
        self.trackSegments = dict()

    def add_evidence(self, ev):
        """
        This function takes a newly-received piece of evidence and generates new
        hypotheses from both the model's template and existing hypotheses
        """

        # update the start/end times for this evidence's actor (if necessary)
        if ((ev.starttime != -1) and ((ev.actor.starttime == -1)
                                      or (ev.starttime < ev.actor.starttime))):
            ev.actor.starttime = ev.starttime
        if ev.endtime > ev.actor.endtime:
            ev.actor.endtime = ev.endtime

        if ev.segtype == Evidence.BASE_SEG:
            try:
                tList = self.trackSegments[ev.actor.name]
            except KeyError:
                tList = []
            tList.append(ev)
            tList = sorted(tList, key=lambda x: x.starttime)
            self.trackSegments[ev.actor.name] = tList

        hypVec = []
        tHyps = self.__add_evidence_subordinate(self.nodes, ev)
        if not tHyps is None:
            for hyp in tHyps:
                hypVec.append(hyp)
        for hyp in self.hypotheses:
            tHyps = self.__add_evidence_subordinate(hyp.nodes, ev)
            if not tHyps is None:
                for tHyp in tHyps:
                    hypVec.append(tHyp)
                if ev.segtype != Evidence.WAKE:
                    # don't put current hyp back in on wake evidence (if the
                    # wake matches this hyp) since it will just duplicate the
                    # hypothesis
                    hypVec.append(hyp)
                else:
                    pass
            else:
                hypVec.append(hyp)
        self.hypotheses = hypVec

    def __add_evidence_subordinate(self, nodelist, ev):
        """
        This function compares a piece of evidence to a hypothesis and tries to
        match the evidence's type (e.g. a MovingFast segment) with the type
        predicted by each of the  hypothesis's nodes

        Upon finding a match, this function generates a new hypothesis with the
        new evidence's actor bound in place of the node's placeholder actor
        across the model (it does this for every match the new evidence
        generates)

        If the evidence passed in refers to an already bound actor (in case the
        model tries to predict multiple movement patterns by a person at
        different time points), the model binds the evidence vector passed in
        where appropriate
        """

        repHyps = None
        found = False
        repActors = []
        # repActors is the vector of valid actor bindings given the type of
        # evidence we've received
        repEvs = []  # repEvs is the vector of evidence bindings
        bindActor = True
        for node in nodelist:
            # looking for nodes whose evidence templates include the type we've
            # received
            for cev in node.evvecs:
                # evidence objects have a segment type (e.g. MovingFast); match
                # the template evidence labels against the type of evidence
                # we've received
                if Evidence.matches(ev, cev):
                    # found a node which uses this type of evidence

                    # if the evidence we're replacing is associated with a
                    # placeholder actor, we want to bind that placeholder to the
                    # new evidence's actor (do this for every node that
                    # references this placeholder actor)
                    #
                    # if it's already associated with an observed actor, it
                    # needs to be the actor that the new evidence references for
                    # the new evidence to match the template

                    repActors.append(cev. actor)
                    repEvs.append(cev)
                    if cev.actor.bound:
                        bindActor = False
                    found = True

        # replace the template evidence with the newly observed evidence
        # throughout the model, and bind the actor if it's currently a
        # placeholder
        if found:
            repHyps = []
            # repHyps is the list of all the new hypotheses we can generate by
            # binding the evidence we've received (e.g. a MovingFast segment
            # generates both Chase(A,*) and Chase(*,A))
            for aind, repActor in enumerate(repActors):
                # iterating over all the valid bindings we found
                repEv = repEvs[aind]
                repHyp = hypothesis()
                repHyp.name = self.name
                repNodes = []
                nNodes = []
                rparents = []
                repHyp.topNode = self.topNode
                nst = float("inf")
                net = -float("inf")
                for node in nodelist:
                    # bind the new evidence (and possibly its actor) in every
                    # node that uses the template we're replacing
                    replaced = False
                    repNode = copy.copy(node)
                    if bindActor:
                        # don't need to replace actor if we're just binding new
                        # evidence to an existing actor, otherwise replace any
                        # references to the placeholder actor with the received
                        # evidence's actor
                        repSubs = repNode.subjects
                        repObs = repNode.objects
                        for i, act in enumerate(node.subjects):
                            if act == repActor:
                                replaced = True
                                repSubs = (repSubs[:i] + [ev.actor] +
                                           repSubs[i+1:])
                        for i, act in enumerate(node.objects):
                            if act == repActor:
                                replaced = True
                                repObs = repObs[:i] + [ev.actor] + repObs[i+1:]
                        repNode.subjects = repSubs
                        repNode.objects = repObs
                    nEv = repNode.evvecs
                    for i, tev in enumerate(node.evvecs):
                        # replace the template evidence we're binding to if it
                        # occurs in this node
                        if tev == repEv:
                            ev.setBound()
                            nEv = nEv[:i] + [ev] + nEv[i+1:]
                    repNode.evvecs = nEv
                    st = float("inf")
                    et = -(float("inf"))
                    for tev in nEv:
                        # a node's start and end times are the earliest start
                        # time and latest end time across all of its evidence
                        cst = tev.starttime
                        cet = tev.endtime
                        if (cst != -1):
                            if (cst < st):
                                st = cst
                                if (cst < nst):
                                    # also set the hypothesis's start and end
                                    # times to the earliest and latest values
                                    # across all its nodes
                                    nst = cst
                        if (cet != -1):
                            if (cet > et):
                                et = cet
                                if (cet > net):
                                    net = cet
                    if not math.isinf(st):
                        repNode.starttime = st
                    if not math.isinf(et):
                        repNode.endtime = et
                    if replaced:
                        rparents.append(repNode.i)
                        # need to keep track of which nodes we've updated so we
                        # can update their children's lists of parents (since
                        # each binding generates a new node)
                        repNode.getID()
                        # getID generates a new ID value (should eventually
                        # verify this node's uniqueness with the blackboard)
                        nNodes.append(repNode)
                    repNodes.append(repNode)
                final_nodes = []
                repTimeNodes = dict()
                for node in repNodes:
                    # if any of this node's parents were updated, we need to
                    # update its list of parents so it refers to the updated
                    # parent nodes
                    nNode = copy.copy(node)
                    nparents = copy.copy(node.parents)
                    for j, pnode in enumerate(rparents):
                        for i, parent in enumerate(node.parents):
                            if pnode == parent.i:
                                nparents = (nparents[:i] + [nNodes[j]] +
                                            nparents[i+1:])
                    nNode.parents = nparents
                    final_nodes.append(nNode)
                    if isinstance(nNode, TimeNode):
                        repTimeNodes[nNode.name] = nNode

                repHyp.nodes = final_nodes
                repHyp.timeNodes = repTimeNodes
                if not math.isinf(nst):
                    repHyp.starttime = nst
                if not math.isinf(net):
                    repHyp.endtime = net
                if ev.segtype == Evidence.WAKE:
                    hs = str(repHyp)
                    repHyps.append(repHyp)
                    self.hypDict[hs] = 1
                else:
                    try:
                        # keep a dictionary of known hypotheses so we don't
                        # store duplicates (e.g. (A,*) and (*,B) will both match
                        # (A,B))
                        hs = str(repHyp)
                        self.hypDict[hs]
                    except KeyError:
                        repHyps.append(repHyp)
                    self.hypDict[hs] = 1
        return repHyps

    def evaluate_evidence(self):
        """
        This function iterates over all of the model's hypotheses and uses
        searchBounds to find locally optimal segment endpoints for each one,
        then calculates their likelihoods"""

        nHyps = []
        for hyp in self.hypotheses:
            likelihood, estimated = self.hypothesis_likelihood(hyp)
            print "starting likelihood is %s" % likelihood
            if math.isnan(likelihood):
                print "skipping node since likelihood cannot be estimated yet"
                continue
            hyp.likelihood = likelihood
            hyp.estimated = estimated
            print "evidence before hill climbing:"
            for node in hyp.nodes:
                for e in node.evvecs:
                    print e
            temp_hypothesis = self.__search_bounds(hyp)
            print "evidence after hill climbing:"
            for node in temp_hypothesis.nodes:
                for e in node.evvecs:
                    print e
            likelihood, estimated = (self.hypothesis_likelihood(
                temp_hypothesis))
            temp_hypothesis.likelihood = likelihood
            temp_hypothesis.estimated = estimated
            nHyps.append(temp_hypothesis)
        self.hypotheses = nHyps

    def __copy_evidence(self, hyp, evDict):
        """
        Copy our evidence with new boundaries back into the hypothesis before
        calculating its likelihood
        """

        for node in hyp.nodes:  # copy the segments with updated boundaries back
            # into the nodes
            evidence_list = node.evvecs
            temp_evidence_list = []
            for ev in evidence_list:
                evidence_string = ev.kStr()
                try:
                    temp_evidence_list.append(evDict[evidence_string])
                except KeyError:
                    # we didn't put this evidence in the dictionary (it's not
                    # bound)
                    temp_evidence_list.append(ev)
            node.evvecs = temp_evidence_list
        return hyp

    def __rebind(self, temp_hypothesis, evidence_dictionary, ev, left_bound,
        right_bound):
        """
        rebind updates the segment boundaries of the evidence we're looking at
        and updates the location vector using the track of the evidence's actor
        """
        print "left bound: %s, right bound: %s" % (left_bound, right_bound)
        old_evidence = copy.deepcopy(evidence_dictionary[ev])
        evidence_dictionary[ev].starttime = left_bound
        evidence_dictionary[ev].endtime = right_bound
        caught = False
        if isinstance(evidence_dictionary[ev], TrackSegment):
            actor = evidence_dictionary[ev].actor
            print ("copying track segment from %s to %s, with actor start time"
                   " of %s and %s total actor locations" %
                   (evidence_dictionary[ev].starttime,
                    evidence_dictionary[ev].endtime, actor.starttime,
                    len(actor.locations)))
            if ((evidence_dictionary[ev].endtime-actor.starttime+1) >
                len(actor.locations)):
                print ("trying to copy segment whose length exceeds actor's"
                       "track length")
                caught = True
            # print "current locations: %s" % evidence_dictionary[ev].locations
            new_start_time = (evidence_dictionary[ev].starttime -
                              actor.starttime)
            new_end_time = (evidence_dictionary[ev].endtime -
                            actor.starttime + 1)
            # n_locations = actor.locations[new_start_time:new_end_time]
            # evidence_dictionary[ev].locations = n_locations
            # print "new locations (",len(n_locations)," frames) are:"
            # print n_locations
            evidence_dictionary[ev].set_boundaries(
                new_start_time, new_end_time)
            if caught:
                sys.exit()
            # print "updated locations: %s" % evidence_dictionary[ev].locations
        temp_hypothesis = self.__copy_evidence(temp_hypothesis,
        evidence_dictionary)
        #self.__copy_evidence(temp_hypothesis,evDict)
        return old_evidence, temp_hypothesis

    def __move_bound(self, evidence_dictionary, temp_hypothesis, evidence,
                     left):
        """
        Tries to move the given evidence's left boundary

        ev argument is the string of the canonical evidence node (used as a key
        for the dictionary of evidence objects)

        left indicates whether we're moving a segment's beginning (left)
        boundary or its ending (right) boundary
        """

        def __unbind(oldEv, temp_hypothesis, ev):
            evidence_dictionary[ev] = oldEv
            temp_hypothesis = self.__copy_evidence(
                temp_hypothesis, evidence_dictionary)
            return temp_hypothesis

        if left:
            # print "\n\nmoving bound left???\n\n"
            minimum_time = evidence_dictionary[evidence].starttime
            # maximum_time = evidence_dictionary[evidence].endtime - 15
            maximum_time = evidence_dictionary[evidence].endtime
            # For now, force segments to remain at least ~.5 seconds in length
            # so we don't fit to just a couple frames
            delta = -1*self.hillStep
            # print "evidence object:%s" % evidence
            if isinstance(evidence_dictionary[evidence], TrackSegment):
                # Need to handle boundary checking for other types of evidence
                # separately since they'll be stored somewhere else
                minimum_time = evidence_dictionary[evidence].actor.starttime
                # print "minimum_time: %s" % minimum_time
                maximum_time = evidence_dictionary[evidence].endtime
                # For now, force segments to remain at least ~.5 seconds in
                # length so we don't fit to just a couple frames
                # one time boundary is determined by the actor's start/end time
                # (depending on which endpoint we're moving), while the other is
                # determined by the endpoint we're not moving
            bound = evidence_dictionary[evidence].starttime
        else:
            minimum_time = evidence_dictionary[evidence].starttime
            maximum_time = evidence_dictionary[evidence].endtime
            delta = self.hillStep
            if isinstance(evidence_dictionary[evidence], TrackSegment):
                minimum_time = evidence_dictionary[evidence].starttime
                maximum_time = evidence_dictionary[evidence].actor.endtime
            bound = evidence_dictionary[evidence].endtime
        retry = False
        new_direction = False
        # delta = lastMove
        print ("delta before bounding is %s, bound is %s, min time is %s, "
               "max time is %s" % (delta, bound, minimum_time, maximum_time))
        if left and ((bound + delta) < minimum_time):
            delta = minimum_time - bound
        elif (not left) and ((bound + delta) > maximum_time):
            delta = maximum_time - bound
        print "delta after bounding is %s" % delta
        print ("Shifting %s bound for evidence %s by %s" %
               (("left" if left else "right"),
                str(evidence_dictionary[evidence]), delta))
        print ("Old boundaries were (%s, %s)" %
               (evidence_dictionary[evidence].starttime,
                evidence_dictionary[evidence].endtime))
        print "Old likelihood was %s" % temp_hypothesis.likelihood
        bound += delta
        if left:
            left_bound = bound
            right_bound = evidence_dictionary[evidence].endtime
        else:
            left_bound = evidence_dictionary[evidence].starttime
            right_bound = bound
        old_evidence, temp_hypothesis = self.__rebind(
            temp_hypothesis, evidence_dictionary, evidence, left_bound,
            right_bound)
        # print ("New boundaries are (%s, %s)" %
        #        (evidence_dictionary[evidence].starttime,
        #         evidence_dictionary[evidence].endtime))
        temp_likelihood, ignore = self.hypothesis_likelihood(temp_hypothesis)
        # print "New likelihood is %s" % temp_likelihood
        if temp_likelihood < temp_hypothesis.likelihood:  # Move made the
            # likelihood worse
            retry = True
        while retry:
            bound -= delta  # Reset boundary
            temp_hypothesis = __unbind(old_evidence, temp_hypothesis, evidence)
            if np.abs(delta) > self.hillStep:  # First check whether we should
                # try a smaller step
                delta -= int(math.floor(self.hillStep/2))
            elif np.abs(delta) > 2:
                delta = delta/2
            elif (not new_direction):  # Already tried small step size in that
                # direction, so try the other direction
                new_direction = True
                if delta < 0:
                    delta = self.hillStep
                else:
                    delta = -1 * self.hillStep  # Just pick default magnitude in
                    # opposite direction
            else:  # Already tried large and small steps in both directions, so
                # don't move this boundary
                retry = False
            if retry:
                if ((left or (not left and new_direction))
                    and ((bound + delta) < minimum_time)):
                    delta = minimum_time - bound
                    bound = minimum_time
                elif ((not left or (left and new_direction))
                      and ((bound + delta) > maximum_time)):
                    delta = maximum_time - bound
                    bound = maximum_time
                else:
                    bound += delta
                if left:
                    left_bound = bound
                    right_bound = evidence_dictionary[evidence].endtime
                else:
                    left_bound = evidence_dictionary[evidence].starttime
                    right_bound = bound
                print ("in retry block with bound of %s and delta of %s, moving"
                       " %s bound" %
                       (bound, delta, ("left" if left else "right")))
                print ("Old boundaries were (%s, %s)" %
                       (evidence_dictionary[evidence].starttime,
                        evidence_dictionary[evidence].endtime))
                print "Old likelihood was %s" % temp_hypothesis.likelihood
                old_evidence, temp_hypothesis = self.__rebind(
                    temp_hypothesis, evidence_dictionary, evidence, left_bound,
                    right_bound)
                print ("New boundaries are (%s, %s)" %
                       (evidence_dictionary[evidence].starttime,
                        evidence_dictionary[evidence].endtime))
                temp_likelihood, ignore = self.hypothesis_likelihood(
                    temp_hypothesis)
                # print "New likelihood is %s" % temp_likelihood
                if temp_likelihood > temp_hypothesis.likelihood:
                    retry = False
        bound -= delta
        # Move boundary back, since we only want to actually change the boundary
        # for the best move
        temp_hypothesis = __unbind(old_evidence, temp_hypothesis, evidence)
        return temp_likelihood, delta

    def __search_bounds(self, hyp, eps=10**(-5)):
        """
        This function takes a hypothesis and searches for locally optimal
        segment boundaries starting from the current segment boundaries.
        It returns the same hypothesis with its evidence segment endpoints in
        the best configuration found (should be close to a local optimum)

        eps is the minimum improvement size we must find in order to keep
        optimizing

        It iterates over nodes in the hypothesis, then the evidence within a
        node, checking to see whether shifting the evidence's boundary improves
        the likelihood

        It calls moveBound in order to determine a more likely location for each
        endpoint of each segment (should one exist)
        """

        temp_hypothesis = copy.copy(hyp)
        evidence_dictionary = dict()
        # Dictionary mapping string representations of evidence nodes to
        # canonical reference for this hypothesis
        for node in temp_hypothesis.nodes:
            evidence_list = node.evvecs
            for ev in evidence_list:
                if not ev.bound:
                    # Only include bound evidence (most expectations aren't
                    # affected by segment length, and the ones that are have a
                    # distribution over the duration in their score/density
                    # function)
                    continue
                label = ev.kStr()
                evidence_dictionary[label] = ev
                # Just overwrite any existing reference, so the canonical one is
                # always the one we find last

        # Now go through models and give them the canonical references for their
        # evidence
        for node in temp_hypothesis.nodes:
            evidence_list = node.evvecs
            temp_evidence_list = []
            for ev in evidence_list:
                label = ev.kStr()
                try:
                    temp_evidence_list.append(evidence_dictionary[label])
                except KeyError:  # we didn't put this evidence in the
                    # dictionary (it's not bound)
                    temp_evidence_list.append(ev)
            node.evvecs = temp_evidence_list

        # Now that we have evidence dict, start looking for local maximum
        improvement_ratio = float("inf")  # Size of improvement from last move
        best_likelihood = temp_hypothesis.likelihood
        # only keep moves that are an improvement over initial config
        best_move = [evidence_dictionary.keys()[0], 0, 0]
        while improvement_ratio > eps:
            i = 0
            for ev in evidence_dictionary.keys():
                # print "i: %s" % i
                i += 1
                # print "moving boundaries for %s" % evidence_dictionary[ev]
                if not evidence_dictionary[ev].bound:
                    continue
                temp_likelihood_left, delta_left = self.__move_bound(
                    evidence_dictionary, temp_hypothesis, ev, True)
                # find candidate move for left endpoint
                temp_likelihood_right, delta_right = self.__move_bound(
                    evidence_dictionary, temp_hypothesis, ev, False)
                # find candidate move for right endpoint
                if temp_likelihood_left > temp_likelihood_right:
                    cur_likelihood = temp_likelihood_left
                    cur_move = [ev, 0, delta_left]
                else:
                    cur_likelihood = temp_likelihood_right
                    cur_move = [ev, 1, delta_right]
                if ((not math.isnan(cur_likelihood)) and (cur_likelihood >
                                                          best_likelihood)):
                    # print ("updating best likelihood to %s and"
                    #        "best move to %s" % (cur_likelihood, cur_move))
                    best_likelihood = cur_likelihood
                    best_move = cur_move
            if best_likelihood > temp_hypothesis.likelihood:
                # Found a move that improves the likelihood
                delta = best_move[2]
                # print ("best move parameters: [%s]" %
                #        (",".join([str(x) for x in best_move])))
                left = (True if (best_move[1] == 0) else False)
                move_evidence = evidence_dictionary[best_move[0]]
                print ("evidence starttime: %s, endtime: %s, delta: %s" %
                       (move_evidence.starttime, move_evidence.endtime, delta))
                if left:
                    bound = move_evidence.starttime + delta
                    # apply the move to our canonical evidence set
                else:
                    bound = move_evidence.endtime + delta
                print "best move found was %s" % best_move
                print ("corresponding evidence is %s" %
                       evidence_dictionary[best_move[0]])
                if left:
                    left_bound = bound
                    right_bound = move_evidence.endtime
                else:
                    left_bound = move_evidence.starttime
                    right_bound = bound
                previous_evidence, temp_hypothesis = self.__rebind(
                    temp_hypothesis, evidence_dictionary, ev, left_bound,
                    right_bound)
                # print ("evidence_dictionary[%s] now contains %s" % (
                #     best_move[0], evidence_dictionary[best_move[0]]))
                # print "updating evidence_dictionary in searchbounds"
                new_likelihood, ignore = self.hypothesis_likelihood(
                    temp_hypothesis)
                # print ("setting temp_hypothesis likelihood to %s" %
                #        new_likelihood)
                eBLH = np.exp(best_likelihood)
                eTHL = np.exp(temp_hypothesis.likelihood)
                improvement_ratio = (eBLH - eTHL)/eTHL
                temp_hypothesis.likelihood = new_likelihood
                print "tHL is %s and eBLH is %s" % (eTHL, eBLH)
                print "improvement ratio is: %s " % improvement_ratio
            else:
                # Didn't find any moves that improve our likelihood, so
                # improvement_ratio is 0
                break
        if not math.isinf(improvement_ratio):
            print ("\n\n\nimprovement ratio is too small to continue, so final "
                   "likelihood is %s" % temp_hypothesis.likelihood)
            print "likelihood achieved with the following nodes:"
            for ev in evidence_dictionary.values():
                print ev
            ### TODO:
            # here we need to rebind the node/hypothesis start and end times to
            # reflect the updated evidence boundaries
            print "\n\n\n"

        if not math.isinf(improvement_ratio):
            # found a new endpoint that improves our likelihood
            for node in temp_hypothesis.nodes:
                # copy the segments with updated boundaries back into the nodes
                evidence_list = node.evvecs
                temp_evidence_list = []
                for ev in evidence_list:
                    label = ev.kStr()
                    try:
                        temp_evidence_list.append(evidence_dictionary[label])
                    except KeyError:
                        # we didn't put this evidence in the dictionary (it's
                        # not bound)
                        temp_evidence_list.append(ev)
                node.evvecs = temp_evidence_list
            temp_hypothesis.update_time_boundaries()
        return temp_hypothesis

    ###
    # ***Stuff below is notes about how a more robust solution to moving
    #endpoints would work (hill-climbing isn't ideal since the affects of the
    #endpoints on the likelihood aren't independent)***
    #
    # ***Implementation Note***
    # Each piece of evidence has two boundaries: a start and end point
    # each boundary can be moved earlier or later or left where it is
    # since the movement of both boundaries may affect the likelihood
    #differently than the combination of both of their effects when moved
    #separately, we need to try all 9 combinations and then explore any that
    #increase the model's joint density

    # The above gives 9 combinations per piece of evidence

    # In addition to density changes from moving both endpoints for a piece of
    # evidence not being predictable based on the change when moving just one,
    # moving the endpoints for multiple pieces of evidence can affect the
    # likelihood in a way that isn't predicted by the effects of moving them
    # individually

    # This suggests that we need to try every combination of endpoint movements
    # across all pieces of evidence, or 6^|e| combinations, where |e| is the
    # number of pieces of evidence in this model.  However, we only need to look
    # at combinations across evidence segments which have a common parent node
    # (if they don't share a density function, their endpoint movements affect
    # the likelihood independently of each other).  Note that if nodes A and B
    # share a parent, and B and C share a (different) parent, we need to look at
    # combinations across A, B and C (since the joint density for the model
    # depends on both parent nodes, and one parent node's density is affected by
    # the combination of A's and B's endpoints, while the other's density is
    # affected by the combination of B's and C's endpoints)
    # """
    # dSets = []
    # """A list of lists, with each list indicating a set of evidence objects
    # whose optimal endpoint locations depend on the endpoint locations of the
    # other objects in the list)
    # By maintaining these as separate lists, we can optimize the endpoints for
    # every element in a dependent set jointly, and do this independently for
    # each dependent set, arriving at a locally optimal set of endpoints for
    # every piece of evidence in the model"""
    # evDicts = []
    # """Build dictionary of evidence nodes (organized into lists corresponding
    # to dependent sets of evidence) contained in this hypothesis so we can have
    # a single reference for each piece of evidence, modify the reference's
    # data, and then copy the reference back into the model in place of its
    # previous values"""

    def hypothesis_likelihood(self, hyp):
        ###
        # This function calculates the likelihood of a hypothesis (or the
        # expected likelihood if some evidence hasn't been observed), used in
        # evalEv and moveBounds
        ###
        # print "in likelihood function"

        obsNodes = dict()  # which nodes we can calculate
        suffEv = True  # enough evidence has been observed to estimate the
        # hypothesis's likelihood
        allEv = True  # all evidence used by this hypothesis has been observed
        rLH = float("nan")
        rEst = True
        for node in hyp.nodes:
            if node.name == hyp.topNode:
                obsNodes[node.name] = False
                continue
            nodeAE = True
            if node.evvecs is not None:
                for ev in node.evvecs:
                    if not ev.bound:
                        if ev.segtype != Evidence.WAKE:
                            allEv = False
                            nodeAE = False
                        if "subject" in node.reqActs:
                            for sub in node.subjects:
                                if sub.name == ev.actor.name:
                                    if not ev.actor.bound:
                                        suffEv = False
                        if "object" in node.reqActs:
                            for ob in node.objects:
                                if ob.name == ev.actor.name:
                                    if not ev.actor.bound:
                                        suffEv = False

            else:
                nodeAE = True  # this shouldn't ever actually come up, but if a
                # node doesn't predict any evidence then its density function
                # shouldn't require any either
            obsNodes[node.name] = nodeAE
            # Used to decide whether we can use this node's joint density to
            # weight our belief in the nodes for which we're missing some
            # evidence
            if nodeAE:
                ###
                # don't calculate top node's likelihood since that's the
                # hypothesis likelihood, which we're calculating below
                # also don't calculate the likelihood for a node if we haven't
                # observed the required set of evidence
                ###
                node.likelihood, node.prior = node.lfunction(node.evvecs)
        if allEv:
            ###
            # Observed everything, so just take the joint across nodes
            # need to calculate joint of nodes' evidence likelihoods
            # (we have f(E|n) where E is evidence vector and n is latent node
            # for each node, now we need the joint across nodes)
            ###

            if self.independent:
                ###
                # joint is just the sum since nodes are independent
                # (sum since values on log scale)
                ###
                cJoint = 0.
                for node in hyp.nodes:
                    # include = False
                    # for p in node.parents:
                    #     if p.name == self.topNode:
                    #         include = True
                    # if include:
                    #     cumP += node.likelihood
                    if node.name != hyp.topNode:
                        #print "adding node "+node.name+"'s likelihood to joint"
                        cJoint += node.likelihood
                rLH = cJoint
                rEst = False
                # print "calculated score is "+str(rLH)
            else:
                rLH = float("nan")  # Don't have any models with conditionally
                # dependent nodes yet; this might have to be handled in the
                # future (don't think it will be necessary though)
        elif suffEv:  # Don't have enough evidence to calculate the joint
            # likelihood, but we can use priors to estimate missing evidence
            # densities
            # print ("estimating score for subject %s and object %s" %
            #        (node.subjects[0].name, node.objects[0].name))
            estNodes = []
            eCuJoint = 0.
            for node in hyp.nodes:
                if node.name == hyp.topNode:
                    continue
                try:
                    if obsNodes[node.name]:
                        # Observed all its evidence, so we use this node to
                        # weight our priors on the others
                        eCuJoint += node.likelihood  # Previously we were
                        # upweighting node expecetations based on other
                        # evidence's strength of belief over its prior -- don't
                        # think this is correct, since expectations are
                        # conditioned on the top node being true anyway
                    else:
                        estNodes.append(node)
                except KeyError:  # node not being in obs dictionary
                    estNodes.append(node)
            for node in estNodes:
                # Don't think we need to subtract explained evidence from
                # cardinality
                # #prior = node.prior+aggPriorRatio-np.log(node.evCard)
                # Don't think we actually want to include the prior, since the
                # scores we return normally aren't considering priors either

                ###
                # Density functions for any parameters we can calculate given
                # what we've observed
                ###
                eCoDen = node.eDen(node.evvecs)
                node.likelihood = eCoDen  # Conditional expectation for node's
                # joint score function
                eCuJoint += eCoDen
            rLH = eCuJoint
            rEst = True
        else:  # If we haven't observed enough evidence to even apply our
            # priors, just return NaN
            rLH = float("nan")
            rEst = True
        return rLH, rEst

    def generate_hypothesis_element(self, tnode, likelihood):
        elts = []
        tag = "actors"
        adict = dict()
        adict["subjects"] = ",".join([x.name for x in tnode.subjects])
        adict["objects"] = ",".join([x.name for x in tnode.objects])
        adict["ind_objects"] = ",".join([x.name for x in tnode.ind_objects])
        elt = elet.Element(tag, adict)
        elts.append(elt)
        return elts

    def generate_hypothesis_xml(self, hyp):
        # print "adding hypothesis to xml tree"
        hyp.ongoing = False
        for node in hyp.nodes:
            if node.name == self.topNode:
                tnode = node
            if node.atEnd:
                for ev in node.evvecs:
                    if ev.endsNode and not ev.bound and ev.willBind:
                        node.ongoing = True
                        hyp.ongoing = True
        adict = dict()
        adict["id"] = str(tnode.i)
        adict["name"] = hyp.name
        timestring = ",".join([str(hyp.starttime), str(hyp.endtime)])
        if hyp.ongoing:
            timestring += "+"
        adict["time"] = timestring
        xml = elet.Element("activity", adict)
        if not np.isnan(hyp.likelihood):
            adict = dict()
            adict["type"] = "expectation" if hyp.estimated else "observed"
            adict["value"] = str(hyp.likelihood)
            elt = elet.Element("score", adict)
            xml.append(elt)
        tempnode = copy.deepcopy(tnode)
        tempnode.starttime = hyp.starttime
        tempnode.endtime = hyp.endtime
        elts = self.generate_actor_elements(tempnode, False)
        for elt in elts:
            #print elt
            if isinstance(elt, list):
                xml.append(elt[0])
            else:
                xml.append(elt)
        for node in hyp.nodes:
            if node != tnode:
                elt = self.generate_node_element(node)
                xml.append(elt)
        return xml

    def generate_actor_element(self, act, role, nst, net, subactivity, node,
                               dummy=False):
        ast = act.starttime
        aet = act.endtime
        #print "actor: "+act.name+" starttime: "+str(ast)+" endtime: "+str(aet)
        if aet == -1:
            ast = nst
            aet = net
            st = nst
            ent = aet
        else:
            st = ast if (ast > nst) else nst
            ent = net
        eltlist = []
        if not subactivity:
            stag = "actor"
        else:
            stag = "subactor"
        sadict = dict()
        sadict["name"] = act.name
        sadict["observed"] = str(act.bound)
        sadict["type"] = act.atype
        sadict["role"] = role
        timestring = str(st)+","+str(ent)
        if act.ongoing and node.atEnd:
            timestring += "+"
        sadict["time"] = timestring
        #eltlist += elts
        if dummy:
            return elet.Element(stag, sadict)
        if ast > nst:
            tAct = Actor(True)
            telt = self.generate_actor_element(tAct, role, nst, ast-1,
                                               subactivity, True)
            eltlist.append(telt)
        eltlist.append(elet.Element(stag, sadict))
        return eltlist

    def generate_actor_elements(self, node, subactivity):
        nst = node.starttime
        net = node.endtime
        elts = []
        for act in node.subjects:
            if not act.bound:
                tact = Actor(True)
                tact.ongoing = act.ongoing
                selts = self.generate_actor_element(tact, "subject", nst, net,
                                                    subactivity, node)
            else:
                selts = self.generate_actor_element(act, "subject", nst, net,
                                                    subactivity, node)
            elts += selts
        for act in node.objects:
            if not act.bound:
                tact = Actor(True)
                tact.ongoing = act.ongoing
                selts = self.generate_actor_element(tact, "object", nst, net,
                                                    subactivity, node)
            else:
                selts = self.generate_actor_element(act, "object", nst, net,
                                                    subactivity, node)
            elts += selts
        for act in node.ind_objects:
            if not act.bound:
                tact = Actor(True)
                tact.ongoing = act.ongoing
                selts = self.generate_actor_element(tact, "ind_object", nst,
                                                    net, subactivity, node)
            else:
                selts = self.generate_actor_element(act, "ind_object", nst, net,
                                                    subactivity, node)
        relts = []
        for elt in elts:
            if isinstance(elt, list):
                relts.append(elt[0])
            else:
                relts.append(elt)
            #elts += selts
        return relts

    def generate_node_element(self, node):
        tag = "sub-activity"
        adict = dict()
        adict["name"] = node.name
        #adict["subjects"] = ",".join([x.name for x in node.subjects])
        #adict["objects"] = ",".join([x.name for x in node.objects])
        timestring = ",".join([str(node.starttime), str(node.endtime)])
        if node.ongoing and node.atEnd:
            timestring += "+"
        adict["time"] = timestring
        adict["parents"] = ",".join([str(x.i) for x in node.parents])
        if node.likelihood is not None:
            adict["score"] = str(node.likelihood)
        elt = elet.Element(tag, adict)
        elts = self.generate_actor_elements(node, True)
        for telt in elts:
            elt.append(telt)
        return elt

    def generate_xml(self):
        xmlList = []
        for hyp in self.hypotheses:
            "generating hyp xml"
            xml = self.generate_hypothesis_xml(hyp)
            xmlList.append(xml)
        return xmlList
    #return xmlOb

    def print_hypothesis_xml(self, hyp):
        def indent(elem, level=0):
            i = "\n" + level*"  "
            if len(elem):
                if not elem.text or not elem.text.strip():
                    elem.text = i + "  "
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
                for elem in elem:
                    indent(elem, level+1)
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
            else:
                if level and (not elem.tail or not elem.tail.strip()):
                    elem.tail = i
        xml = self.generate_hypothesis_xml(hyp)
        tree = elet.ElementTree(xml)
        root = tree.getroot()
        indent(root)
        tree.write(sys.stdout)

    def print_xml(self, outf=None, mfiles=False):
        opened = False
        if outf is None:
            outf = sys.stdout
        elif isinstance(outf, str) and not mfiles:
            opened = True
            outf = open(outf, "w")
            #outf.write("<activities>\n")
        elif isinstance(outf, str) and mfiles:
            bname = outf
        elif not isinstance(outf, file):
            raise IOError("Unhandled argument type passed as output file")

        def indent(elem, level=0):
            i = "\n" + level*"  "
            #print "indenting "+str(elem)
            if len(elem):
                if not elem.text or not elem.text.strip():
                    elem.text = i + "  "
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
                for elem in elem:
                    indent(elem, level+1)
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
            else:
                if level and (not elem.tail or not elem.tail.strip()):
                    elem.tail = i

        xmlList = self.generate_xml()
        if mfiles:
            for i, xml in enumerate(xmlList):
                wrapper_elt = elet.Element("activities", dict())
                if i > 0:
                    outf.close()
                fstring = bname+"{:03d}".format(i)+".xml"
                outf = open(fstring, "w")
                wrapper_elt.append(xml)
                tree = elet.ElementTree(wrapper_elt)
                root = tree.getroot()
                indent(root)
                try:
                    tree.write(outf)
                except IOError as e:
                    raise IOError(("Invalid file handle passed as output file:"
                                   "%s" % e.message))
        else:
            wrapper_elt = elet.Element("activities", dict())
            for i, xml in enumerate(xmlList):
                wrapper_elt.append(xml)
            try:
                tree = elet.ElementTree(wrapper_elt)
                root = tree.getroot()
                indent(root)
                tree.write(outf)
            except IOError as e:
                raise IOError("Invalid file handle passed as output file: %s" %
                              e.message)
        if opened:
            #outf.write("</activities>")
            outf.close()
        # if (not opened) and isinstance(outf,file):
        #     outf.write("</activities>")


class hypothesis(object):
    """
    a hypothesis is an instance of a model in which some (or all) of the
    evidence has been bound to events we've observed the node structure is the
    same as the model from which the hypothesis is generated
    """
    def __init__(self):
        self.nodes = []
        self.topNode = None
        self.timeNodes = []
        self.likelihood = float("nan")
        self.name = ""
        self.starttime = -1
        self.endtime = -1
        self.estimated = True
        self.subjects = []
        self.objects = []
        self.ind_objects = []
        """
        Indicates whether our likelihood is estimated or based on all of the
        evidence we expect to see
        """

    def update_time_boundaries(self):
        """
        Adjusts start and end times for nodes in this hypothesis and the
        hypothesis itself such that the end time for a node is the maximum of
        its evidence's end times, and its start time is the minimum of its
        evidence's start times
        """
        # print "updating hypothesis times based on evidence"
        max_hypothesis_time = float("-inf")
        min_hypothesis_time = float("inf")
        for n in self.nodes:
            max_node_time = float("-inf")
            min_node_time = float("inf")
            for e in n.evvecs:
                if e.bound:
                    if e.endtime > max_node_time:
                        # print "updating end time for node %s" % n
                        max_node_time = e.endtime
                        if max_node_time > max_hypothesis_time:
                            max_hypothesis_time = max_node_time
                    if e.starttime < min_node_time:
                        # print "updating start time for node %s" % n
                        min_node_time = e.starttime
                        if min_node_time < min_hypothesis_time:
                            min_hypothesis_time = min_node_time
            n.starttime = min_node_time
            n.endtime = max_node_time
        self.starttime = min_hypothesis_time
        self.endtime = max_hypothesis_time

    def __str__(self):
        tnode = None
        for node in self.nodes:
            if node.name == self.topNode:
                tnode = node
        if not tnode is None:
            slist = [x.name for x in
                     sorted(tnode.subjects, key=operator.attrgetter("name"))]
            olist = [x.name for x in
                     sorted(tnode.objects, key=operator.attrgetter("name"))]
            rstring = ("%s([%s],[%s],[%s,%s])" %
                       (self.name, self.starttime, self.endtime,
                        ",".join(slist), ",".join(olist)))
        else:
            # shouldn't ever get here
            rstring = "%s --created without designating top node--" % self.name
        return rstring

    def queryTime(self, name, ctime=None):
        if (not name in self.timeNodes):
            raise KeyError("Specified TimeNode not found")
        tnode = self.timeNodes[name]
        if ctime is None:
            return tnode.query()
        return tnode.query(ctime)


class Distributions:
    eps = 1.*10.**-10

    class PDFs:
        """
        Distance integrals are taken from edges of intervals if the function is
        not defined for the edges this class is just to aggregate distribution
        density functions which are likely to be used in multiple models
        """
        ### NOTE: ALL PDFS RETURN LOG SCALE VALUES ###
        @staticmethod
        def beta(x, al, be):
            """beta distribution with alpha = al, beta = be density at x"""
            x = np.float(x)
            al = np.float(al)
            be = np.float(be)
            #return x**(al-1.)*(1.-x)**(be-1.)/sp.beta(al,be)
            if ((x < Distributions.eps) or (np.absolute(1.-x)
                                            < Distributions.eps)):
                return float("-inf")
            return ((al-1.)*np.log(x) + (be-1.)*np.log(1.-x) -
                    np.log(sp.beta(al, be)))

        @staticmethod
        def ebeta(x, al, be):
            """
            default beta function returns log value, which we don't want for
            integration
            """
            x = np.float(x)
            al = np.float(al)
            be = np.float(be)
            return x**(al-1.) * (1.-x)**(be-1.) / sp.beta(al, be)

        @staticmethod
        def egamma(x, al, be):
            """
            default gamma function returns log value, which we don't want for
            integration
            """
            if x < Distributions.eps:
                return 0
            x = np.float(x)
            al = np.float(al)
            be = np.float(be)
            return be**al/sp.gamma(al)*x**(al-1.)*np.exp(-be*x)

        @staticmethod
        def gamma(x, al, be):
            """
            gamma distribution with alpha = al, beta = be density at x using
            the parameterization where E[X] = alpha/beta
            """
            x = np.float(x)
            al = np.float(al)
            be = np.float(be)
            #return be**al/sp.gamma(al)*x**(al-1.)*np.exp(-1.*be*x)
            if x < Distributions.eps:
                return float("-inf")
            return al*np.log(be)-np.log(sp.gamma(al))+(al-1.)*np.log(x)-be*x

        @staticmethod
        def expon(x, la):
            la = np.float(la)
            x = np.float(x)
            """exponential distribution with lambda = la at x"""
            return np.log(la) + -la*x

        @staticmethod
        def arraypdf(x, p1, p2, pdff):
            return [pdff(xi, p1, p2) for xi in x]

        @staticmethod
        def nakagami(x, mu, om):
            x = np.float(x)
            mu = np.float(mu)
            om = np.float(om)
            return sps.nakagami.logpdf(x, mu, scale=om)

        @staticmethod
        def foldednorm(x, c, s):
            x = np.float(x)
            c = np.float(c)
            s = np.float(s)
            return sps.foldnorm.logpdf(x, c, scale=s)

    class Surv:
        """survival functions"""

        def __init__():
            pass

        @staticmethod
        def gamma(x, al, be):
            s = np.float(1./be)
            al = np.float(al)
            x = np.float(x)
            #print "logsf: "+str(sps.gamma.logsf(x,al,scale=s))+"\n"
            return sps.gamma.logsf(x, al, scale=s)
