# Copyright 2012 Tom SF Haines

# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

#   http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.



import numpy
import numpy.linalg
import numpy.random

from exemplars import MatrixFS

import scipy.weave as weave
from utils.start_cpp import start_cpp



class Goal:
  """Interface that defines the purpose of a decision forest - defines what the tree is optimising, what statistics to store at each node and what is returned to the user as the answer when they provide a novel feature to the forest (i.e. how to combine the statistics)."""
  
  def clone(self):
    """Returns a deep copy of this object."""
    raise NotImplementedError
  
  
  def stats(self, es, index, weights = None):
    """Generates a statistics entity for a node, based on the features that make it to the node. The statistics entity is decided by the task at hand, but must allow the nodes entropy to be calculated, plus a collection of these is used to generate the answer when a feature is given to the decision forest. fs is a feature set, index the indices of the features in fs that have made it to this node. weights is an optional set of weights for the features, weighting how many features they are worth - will be a 1D numpy.float32 array aligned with the feature set, and can contain fractional weights."""
    raise NotImplementedError
  
  def updateStats(self, stats, es, index, weights = None):
    """Given a stats entity, as generated by the stats method, this returns a copy of that stats entity that has had additional exemplars factored in, specifically those passed in. This allows a tree to be updated with further trainning examples (Or, at least its stats to be updated - its structure is set in stone once built.) Needed for incrimental learning."""
    raise NotImplementedError

  def entropy(self, stats):
    """Given a statistics entity this returns the associated entropy - this is used to choose which test is best."""
    raise NotImplementedError
  
  
  def postTreeGrow(self, root, gen):
    """After a tree is initially grown (At which point its shape is locked, but incrimental learning could still be applied.) this method is given the root node of the tree, and can do anything it likes to it - a post processing step, in case the stats objects need some extra cleverness. Most Goal-s do not need to impliment this. Also provided the generator for the tests in the tree."""
    pass
  

  def answer_types(self):
    """When classifying a new feature an answer is to be provided, of which several possibilities exist. This returns a dictionary of those possibilities (key==name, value=human readable description of what it is.), from which the user can select. By convention 'best' must always exist, as the best guess that the algorithm can give (A point estimate of the answer the user is after.). If a probability distribution over 'best' can be provided then that should be avaliable as 'prob' (It is highly recomended that this be provided.)."""
    return {'best':'Point estimate of the best guess at an answer, in the same form as provided to the trainning stage.'}
  
  def answer(self, stats_list, which, es, index, trees):
    """Given a feature then using a forest a list of statistics entitys can be obtained from the leaf nodes that the feature ends up in, one for each tree (Could be as low as just one entity.). This converts that statistics entity list into an answer, to be passed to the user, possibly using the es with the index of the one entry that the stats list is for as well. As multiple answer types exist (As provided by the answer_types method.) you provide the one(s) you want to the which variable - if which is a string then that answer type is returned, if it is a list of strings then a tuple aligned with it is returned, containing multiple answers. If multiple types are needed then returning a list should hopefuly be optimised by this method to avoid duplicate calculation. Also requires the trees themselves, as a list aligned with stats_list."""
    raise NotImplementedError
  
  def answer_batch(self, stats_lists, which, es, indices, trees):
    """A batch version of answer, that does multiple stat lists at once. The stats_list now consists of a list of lists, where the outer list matches tne entrys in index (A numpy array), and the inner list are the samples, aligned with the trees list. es is the exemplar object that matches up with index, and which gives the output(s) to provide. Return value is a list, matching index, that contains the answer for each, which can be a tuple if which is alist/tuple. A default implimentation is provided."""
    return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))
  
  
  def summary(self, es, index, weights = None):
    """Once a tree has been grown a testing set (The 'out-of-bag' set) is typically run through to find out how good it is. This consists of two steps, the first of which is to generate a summary of the oob set that made it to each leaf. This generates the summary, and must be done such that the next step - the use of a stats and summary entity to infer an error metric with a weight for averaging the error metrics from all leafs, can be performed. For incrimental learning it is also required to be able to add new exemplars at a later time."""
    raise NotImplementedError
  
  def updateSummary(self, summary, es, index, weights = None):
    """For incrimental learning the summaries need to be updated with further testing examples - this does that. Given a summary and some exemplars it returns a copy of the summary updated with the new exemplars."""
    raise NotImplementedError
  
  def error(self, stats, summary):
    """Given a stats entity and a summary entity (i.e. the details of the testing and trainning sets that have reached a leaf) this returns the error of the testing set versus the model learnt from the trainning set. The actual return is a pair - (error, weight), so that the errors from all the leafs can be combined in a weighted average. The error metric is arbitary, but the probability of 'being wrong' is a good choice. An alternate mode exists, where weight is set to None - in this case no averaging occurs and the results from all nodes are just summed together."""
    raise NotImplementedError
    
    
  def codeC(self, name, escl):
    """Returns a dictionary of strings containing C code, that impliment the Goal's methods in C - name is a prefix on the names used, escl the result of listCodeC on the exemplar set from which it will get its data. The contents of its return value must contain some of: `{'stats': 'void <name>_stats(PyObject * data, Exemplar * index, void *& out, size_t & outLen)' - data is the list of channels for the exemplar object, index the exemplars to use. The stats object is stuck into out, and the size updated accordingly. If the provided out object is too small it will be free-ed and then a large enough buffer malloc-ed; null is handled correctly if outLen is 0., 'updateStats': 'void <name>_updateStats(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)' - Same as stats, except the inout data arrives already containing a stats object, which is to be updated with the provided exemplars., 'entropy':'float <name>_entropy(void * stats, size_t statsLen) - Given a stats object returns its entropy.', 'summary': 'void <name>_summary(PyObject * data, Exemplar * index, void *& out, size_t & outLen)' - Basically the same as stats, except this time it is using the exemplars to calculate a summary. Interface works in the same way., 'updateSummary': 'void <name>_updateSummary(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)' - Given a summary object, using the inout variables it updates it with the provided exemplars., 'error': 'void <name>_error(void * stats, size_t statsLen, void * summary, size_t summaryLen, float & error, float & weight)' - Given two buffers, representing the stats and the summary, this calculates the error, which is put into the reference error. This should be done incrimentally, such that errors from all nodes in a tree can be merged - error will be initialised at 0, and addtionally weight is provided which can be used as it wishes (Incremental mean is typical.), also initialised as 0.}`. Optional - if it throws the NotImplementedError (The default) everything will be done in python, if some C code is dependent on a missing C method it will also be done in python. The code can be dependent on the associated exempler code where applicable."""
    raise NotImplementedError
  
  def key(self):
    """Provides a unique string that can be used to hash the results of codeC, to avoid repeated generation. Must be implimented if codeC is implimented."""
    raise NotImplementedError



class Classification(Goal):
  """The standard goal of a decision forest - classification. When trainning expects the existence of a discrete channel containing a single feature for each exemplar, the index of which is provided. Each discrete feature indicates a different trainning class, and they should be densly packed, starting from 0 inclusive, i.e. belonging to the set {0, ..., # of classes-1}. Number of classes is typically provided, though None can be provided instead in which case it will automatically resize data structures as needed to make them larger as more classes (Still densly packed.) are seen. A side effect of this mode is when it returns arrays indexed by class the size will be data driven, and from the view of the user effectivly arbitrary - user code will have to handle this."""
  def __init__(self, classCount, channel):
    """You provide firstly how many classes exist (Or None if unknown.), and secondly the index of the channel that contains the ground truth for the exemplars. This channel must contain a single integer value, ranging from 0 inclusive to the number of classes, exclusive."""
    self.classCount = classCount
    self.channel = channel
  
  def clone(self):
    return Classification(self.classCount, self.channel)
  
  
  def stats(self, es, index, weights = None):
    if len(index)!=0:
      ret = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
      ret = numpy.asarray(ret, dtype=numpy.float32)
    else:
      ret = numpy.zeros(self.classCount if self.classCount!=None else 1, dtype=numpy.float32)
    
    if self.classCount!=None and ret.shape[0]<self.classCount: ret = numpy.concatenate((ret, numpy.zeros(self.classCount-ret.shape[0], dtype=numpy.float32))) # When numpy 1.6.0 becomes common this line can be flipped to a minlength term in the bincount call.
    
    return ret.tostring()
  
  def updateStats(self, stats, es, index, weights = None):
    ret = numpy.fromstring(stats, dtype=numpy.float32)
    toAdd = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
    
    if ret.shape[0]<toAdd.shape[0]:
      ret = numpy.append(ret, numpy.zeros(toAdd.shape[0]-ret.shape[0], dtype=numpy.float32))
    
    ret[:toAdd.shape[0]] += toAdd
    
    return ret.tostring()

  def entropy(self, stats):
    dist = numpy.fromstring(stats, dtype=numpy.float32)
    dist = dist[dist>1e-6] / dist.sum()
    return -(dist*numpy.log(dist)).sum() # At the time of coding scipy.stats.distributions.entropy is broken-ish <rolls eyes> (Gives right answer at the expense of filling your screen with warnings about zeros.).


  def answer_types(self):
    return {'best':'An integer indexing the class this feature is most likelly to belong to given the model.',
            'prob':'A categorical distribution over class membership, represented as a numpy array of float32 type. Gives the probability of it belonging to each class, P(class|data).',
            'prob_samples':'The prob result is obtained by averaging a set of probability distributions, one from each tree - this outputs that list of distributions instead, so its varaibility can be accessed.',
            'gen':'The default probability returned by the system is discriminative - this instead returns a generative result, P(data|class). A numpy array of float32 type containing the data probability for each class - will not sum to 1.',
            'gen_list':'Is to gen as prob_samples is to prob. Gives a list of probabilities representing P(class|data), so the consistancy can be accessed.'}
  
  def answer(self, stats_list, which, es, index, trees):
    # Convert to a list, and process like that, before correcting for the return - simpler...
    single = isinstance(which, str)
    if single: which = [which]
    
    # Calulate the probability distribution over class membership, both discriminativly and generativly...
    needGen = ('gen' in which) or ('gen_list' in which)
    
    prob_list = []
    if needGen: gen_list = []
    
    cCount = self.classCount if self.classCount!=None else 1
    prob = numpy.zeros(cCount, dtype=numpy.float32)
    if needGen: gen = numpy.zeros(cCount, dtype=numpy.float32)
    
    for stats, tree in zip(stats_list, trees):
      cat = numpy.fromstring(stats, dtype=numpy.float32)
      
      dist = cat / cat.sum()
      prob_list.append(dist)
      
      if dist.shape[0]>prob.shape[0]:
        prob = numpy.append(prob, numpy.zeros(dist.shape[0]-prob.shape[0], dtype=numpy.float32))
      prob[:dist.shape[0]] += dist
      
      if needGen:
        div = numpy.fromstring(tree.stats, dtype=numpy.float32)
        use = numpy.where(div[:cat.shape[0]]>0.0)
        g = numpy.zeros(cat.shape[0], dtype=numpy.float32)
        g[use] = cat[use] / div[use]
        gen_list.append(g)
        
        if g.shape[0]>gen.shape[0]:
          gen = numpy.append(gen, numpy.zeros(g.shape[0]-gen.shape[0], dtype=numpy.float32))
        gen[:g.shape[0]] += g
    
    prob /= prob.sum()
    if needGen: gen /= len(gen_list)
    
    # Prepare the return...
    def make_answer(t):
      if t=='prob': return prob
      elif t=='best': return prob.argmax()
      elif t=='prob_samples': return prob_list
      elif t=='gen': return gen
      elif t=='gen_list': return gen_list
    
    ret = map(make_answer, which)
    
    # Make sure the correct thing is returned...
    if single: return ret[0]
    else: return tuple(ret)
  
  def answer_batch(self, stats_lists, which, es, indices, trees):
    # As this version might be dealing with lots of data we include a scipy.weave based optimisation...
    if weave!=None:
      code = start_cpp() + """
      // Find out what we need to calculate...
       bool doProbList = false;
       bool doGen = false;
       bool doGenList = false;
       
       int wLength = PyList_Size(which);
       int * wCodes = (int*)malloc(sizeof(int) * wLength);
       for (int i=0; i<wLength; i++)
       {
        char * s = PyString_AsString(PyList_GetItem(which, i));
        wCodes[i] = 0; // prob
        if (strcmp(s,"best")==0) wCodes[i] = 1;
        if (strcmp(s,"prob_samples")==0) {doProbList = true; wCodes[i] = 2;}
        if (strcmp(s,"gen")==0) {doGen = true; wCodes[i] = 3;}
        if (strcmp(s,"gen_list")==0) {doGenList = true; wCodes[i] = 4;}
       }
       
      // Buffers that are needed...
       float * probBuf = 0;
       float * genBuf = 0;
      
      // Prep the return value...
       int item_count = PyList_Size(stats_lists);
       PyObject * ret = PyList_New(item_count);
       
      // Loop through and do each exemplar in turn, adding its result to the return list...       
       for (int i=0; i<item_count; i++)
       {
        // Get the list of stats objects...
         PyObject * stats = PyList_GetItem(stats_lists, i);
         int statCount = PyList_Size(stats);
       
        // Iterate the list and calculate the size of the largest element...
         npy_intp vecLength = 0;
         for (int j=0; j<statCount; j++)
         {
          PyObject * s = PyList_GetItem(stats, j);
          int len = PyString_Size(s) / sizeof(float);
          if (len>vecLength) vecLength = len;
         }
        
        // Resize the buffers accordingly, zero them...
         probBuf = (float*)realloc(probBuf, sizeof(float)*vecLength);
         for (int j=0; j<vecLength; j++) probBuf[j] = 0.0;
         
         if (doGen)
         {
          genBuf = (float*)realloc(genBuf, sizeof(float)*vecLength);
          for (int j=0; j<vecLength; j++) genBuf[j] = 0.0;
         }
        
        // Iterate the list and generate the various outputs we need (There are potentially 4 of them.)...
         PyObject * probList = 0;
         PyObject * genList = 0;
         if (doProbList) probList = PyList_New(statCount);
         if (doGenList) genList = PyList_New(statCount);
         
         for (int j=0; j<statCount; j++)
         {
          PyObject * s = PyList_GetItem(stats, j);
          int len = PyString_Size(s) / sizeof(float);
          float * dist = (float*)(void*)PyString_AsString(s);
          
          float sum = 0.0;
          for (int k=0; k<len; k++) sum += dist[k];
          if (sum<1e-6) sum = 1e-6; // For safety against divide by zero.
          
          for (int k=0; k<len; k++) probBuf[k] += dist[k] / sum;
          if (doProbList)
          {
           PyObject * arr = PyArray_ZEROS(1, &vecLength, NPY_FLOAT, 0);
           for (int k=0; k<len; k++) *(float*)PyArray_GETPTR1(arr, k) = dist[k] / sum;
           PyList_SetItem(probList, j, arr);
          }
          
          if ((doGen)||(doGenList))
          {
           PyObject * t = PyList_GetItem(root_stats, j);
           float * div = (float*)(void*)PyString_AsString(t);
           
           if (doGen)
           {
            for (int k=0; k<len; k++) genBuf[k] += dist[k] / div[k];
           }
           if (doGenList)
           {
            PyObject * arr = PyArray_ZEROS(1, &vecLength, NPY_FLOAT, 0);
            for (int k=0; k<len; k++) *(float*)PyArray_GETPTR1(arr, k) = dist[k] / div[k];
            PyList_SetItem(genList, j, arr);
           }
          }
         }
         
        // Normalise the buffers...
         {
          float sum = 0.0;
          for (int j=0; j<vecLength; j++) sum += probBuf[j];
          for (int j=0; j<vecLength; j++) probBuf[j] /= sum;
         }
         
         if (doGen)
         {
          for (int j=0; j<vecLength; j++) genBuf[j] /= statCount;
         }
        
        // Iterate the proxy for which, and store the required items in the correct positions...
         PyObject * ans = PyTuple_New(wLength);
         
         for (int j=0; j<wLength; j++)
         {
          PyObject * obj = 0;
          switch(wCodes[j])
          {
           case 0: // prob
           {
            obj = PyArray_EMPTY(1, &vecLength, NPY_FLOAT, 0);
            for (int k=0; k<vecLength; k++) *(float*)PyArray_GETPTR1(obj, k) = probBuf[k];
           }
           break;
           case 1: // best
           {
            int best = 0;
            for (int k=1; k<vecLength; k++)
            {
             if (probBuf[k]>probBuf[best]) best = k;
            }
            obj = PyInt_FromLong(best);
           }
           break;
           case 2: // prob_samples
           {
            obj = probList;
            Py_INCREF(obj);
           }
           break;
           case 3: // gen
           {
            obj = PyArray_EMPTY(1, &vecLength, NPY_FLOAT, 0);
            for (int k=0; k<vecLength; k++) *(float*)PyArray_GETPTR1(obj, k) = genBuf[k];
           }
           break;
           case 4: // gen_list
           {
            obj = genList;
            Py_INCREF(obj);
           }
           break;
          }
         
          PyTuple_SetItem(ans, j, obj);
         }
       
        // Store the answer tuple for this exemplar...
         PyList_SetItem(ret, i, ans);
         
        // Some cleaning up...
         Py_XDECREF(genList);
         Py_XDECREF(probList);
       }
      
      // Clean up...
       free(probBuf);
       free(genBuf);
       free(wCodes);
      
      // Return the list of results...
       return_val = ret;
       Py_XDECREF(ret);
      """
      
      root_stats = map(lambda t: t.stats, trees)
      
      single = isinstance(which, str)
      if single: which = [which]
      ret = weave.inline(code, ['stats_lists', 'which', 'root_stats'])
      if single: ret = map(lambda r: r[0], ret)
      return ret
      
    else:
      return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))
    


  def summary(self, es, index, weights = None):
    ret = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
    ret = numpy.asarray(ret, dtype=numpy.float32)
    
    if self.classCount!=None and ret.shape[0]<self.classCount: ret = numpy.append(ret, numpy.zeros(self.classCount-ret.shape[0], dtype=numpy.float32)) # When numpy 1.6.0 becomes common this line can be flipped to a minlength term in the bincount call.
    
    return ret.tostring()
  
  def updateSummary(self, summary, es, index, weights = None):
    ret = numpy.fromstring(summary, dtype=numpy.float32)
    toAdd = numpy.bincount(es[self.channel, index,0], weights=weights[index] if weights!=None else None)
    
    if ret.shape[0]<toAdd.shape[0]:
      ret = numpy.append(ret, numpy.zeros(toAdd.shape[0]-ret.shape[0], dtype=numpy.float32))
    
    ret[:toAdd.shape[0]] += toAdd
    
    return ret.tostring()
  
  def error(self, stats, summary):
    # Treats the histogram of trainning samples as a probability distribution from which the answer is drawn from - the error is then the average probability of getting each sample in the sample wrong, and the weight the number of exemplars that went into the sample...
    ## Fetch the distribution/counts...
    dist = numpy.fromstring(stats, dtype=numpy.float32)
    dist /= dist.sum()
    test = numpy.fromstring(summary, dtype=numpy.float32)
    count = test.sum()
    
    if dist.shape[0] < test.shape[0]:
      dist = numpy.append(dist, numpy.zeros(test.shape[0]-dist.shape[0], dtype=numpy.float32))
    
    # Calculate and average the probabilities...
    avgError = ((1.0-dist[:test.shape[0]])*test).sum() / count
    
    return (avgError, count)


  def codeC(self, name, escl):    
    cStats = start_cpp() + """
    void %(name)s_stats(PyObject * data, Exemplar * index, void *& out, size_t & outLen)
    {
     // Make sure the output it at least as large as classCount, and zero it out...
      if (outLen<(sizeof(float)*%(classCount)i))
      {
       outLen = sizeof(float) * %(classCount)i;
       out = realloc(out, outLen);
      }
      
      for (int i=0; i<(outLen/sizeof(float)); i++)
      {
       ((float*)out)[i] = 0.0;
      }
     
     // Iterate and play weighted histogram, growing out as needed...
      %(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
      
      int maxSeen = %(classCount)i;
      while (index)
      {
       int cls = %(channelName)s_get(cData, index->index, 0);
       int cap = cls+1;
       if (cap>maxSeen) maxSeen = cap;
       
       if ((cap*sizeof(float))>outLen)
       {
        int zero_start = outLen / sizeof(float);
        
        outLen = cap*sizeof(float);
        out = realloc(out, outLen);
        
        for (int i=zero_start; i<cap; i++)
        {
         ((float*)out)[i] = 0.0;
        }
       }
       
       ((float*)out)[cls] += index->weight;
       
       index = index->next;
      }
      
     // Correct the output size if needed (It could be too large)...
      outLen = maxSeen * sizeof(float);
    }
    """%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype'], 'classCount':self.classCount if self.classCount!=None else 1}
    
    cUpdateStats = start_cpp() + """
    void %(name)s_updateStats(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)
    {
     // Iterate and play weighted histogram, growing out as needed...
      %(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
      
      int maxSeen = inoutLen / sizeof(float);
      while (index)
      {
       int cls = %(channelName)s_get(cData, index->index, 0);
       int cap = cls+1;
       if (cap>maxSeen) maxSeen = cap;
       
       if ((cap*sizeof(float))>inoutLen)
       {
        int zero_start = inoutLen / sizeof(float);
        
        inoutLen = cap*sizeof(float);
        inout = realloc(inout, inoutLen);
        
        for (int i=zero_start; i<cap; i++)
        {
         ((float*)inout)[i] = 0.0;
        }
       }
       
       ((float*)inout)[cls] += index->weight;
       
       index = index->next;
      }
    }
    """%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype']}
    
    cEntropy = start_cpp() + """
    float %(name)s_entropy(void * stats, size_t statsLen)
    {
     float sum = 0.0;
     int length = statsLen>>2;
     for (int i=0; i<length; i++)
     {
      sum += ((float*)stats)[i];
     }
     
     float ret = 0.0;
     for (int i=0; i<length; i++)
     {
      float val = ((float*)stats)[i];
      if (val>1e-6)
      {
       val /= sum;
       ret -= val * log(val);
      }
     }
     
     return ret;
    }
    """%{'name':name}
    
    cSummary = start_cpp() + """
    void %(name)s_summary(PyObject * data, Exemplar * index, void *& out, size_t & outLen)
    {
     // Make sure the output it at least as large as classCount, and zero it out...
      if (outLen<(sizeof(float)*%(classCount)i))
      {
       outLen = sizeof(float) * %(classCount)i;
       out = realloc(out, outLen);
      }
      
      for (int i=0; i<(outLen/sizeof(float)); i++)
      {
       ((float*)out)[i] = 0.0;
      }
     
     // Iterate and play weighted histogram, growing out as needed...
      %(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
      
      int maxSeen = %(classCount)i;
      while (index)
      {
       int cls = %(channelName)s_get(cData, index->index, 0);
       int cap = cls+1;
       if (cap>maxSeen) maxSeen = cap;
       
       if ((cap*sizeof(float))>outLen)
       {
        int zero_start = outLen / sizeof(float);
        
        outLen = cap*sizeof(float);
        out = realloc(out, outLen);
        
        for (int i=zero_start; i<cap; i++)
        {
         ((float*)out)[i] = 0.0;
        }
       }
       
       ((float*)out)[cls] += index->weight;
       
       index = index->next;
      }
      
     // Correct the output size if needed (It could be too large)...
      outLen = maxSeen * sizeof(float);
    }
    """%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype'], 'classCount':self.classCount if self.classCount!=None else 1}
    
    cUpdateSummary = start_cpp() + """
    void %(name)s_updateSummary(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)
    {
     // Iterate and play weighted histogram, growing out as needed...
      %(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
      
      int maxSeen = inoutLen / sizeof(float);
      while (index)
      {
       int cls = %(channelName)s_get(cData, index->index, 0);
       int cap = cls+1;
       if (cap>maxSeen) maxSeen = cap;
       
       if ((cap*sizeof(float))>inoutLen)
       {
        int zero_start = inoutLen / sizeof(float);
        
        inoutLen = cap*sizeof(float);
        inout = realloc(inout, inoutLen);
        
        for (int i=zero_start; i<cap; i++)
        {
         ((float*)inout)[i] = 0.0;
        }
       }
       
       ((float*)inout)[cls] += index->weight;
       
       index = index->next;
      }
    }
    """%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype']}
    
    cError = start_cpp() + """
    void %(name)s_error(void * stats, size_t statsLen, void * summary, size_t summaryLen, float & error, float & weight)
    {
     // Sum the stuff in stats...
      int statsSize = statsLen / sizeof(float);
      float statsSum = 0.0;
      for (int i=0; i<statsSize; i++) statsSum += ((float*)stats)[i];
      
     // Go through and factor in each class from the summary in turn, using an incrimental mean...
      int summarySize = summaryLen / sizeof(float);
      for (int c=0; c<summarySize; c++)
      {
       float avgErr = (c<statsSize)?(1.0 - ((float*)stats)[c]/statsSum):1.0;
       float w = ((float*)summary)[c];
       
       weight += w;
       if (weight>1e-3)
       {
        error += (avgErr-error) * w/weight;
       }
      }
    }
    """%{'name':name}
    
    return {'stats':cStats, 'updateStats':cUpdateStats, 'entropy':cEntropy, 'summary':cSummary, 'updateSummary':cUpdateSummary, 'error':cError}
  
  def key(self):
    return ('Classification|%i'%self.channel) + ('' if self.classCount==None else (':%i'%self.classCount))



class DensityGaussian(Goal):
  """Provides the ability to construct a density estimate, using Gaussian distributions to represent the density at each node in the tree. A rather strange thing to be doing with a decision forest, and I am a little suspicious of it, but it does give usable results, at least for low enough dimensionalities where everything remains sane. Due to its nature it can be very memory consuming if your doing incrmental learning - the summary has to store all the provided samples. Requires a channel to contain all the features that are fed into the density estimate (It is to this that a Gaussian is fitted.), which is always in channel 0. Other features can not exist, so typically input data would only have 1 channel. Because the divisions between nodes are sharp (This is a mixture model only between trees, not between leaf nodes within each tree.) the normalisation constant for each Gaussian has to be adjusted to take this into account. This is acheived by sampling - sending samples from the Gaussian down the tree and counting what percentage make the node. Note that when calculating the Gaussian at each node a prior is used, to avoid degeneracies, with a default weight of 1, so if weights are provided they should be scaled accordingly. Using a decision tree for density estimation is a bit hit and miss based on my experiance - you need to pay very close attention to tuning the min train parameter of the pruner, as information gain is a terrible stopping metric in this case. You also need a lot of trees to get something smooth out, which means it is quite computationally expensive."""
  def __init__(self, feats, samples = 1024, prior_weight = 1.0):
    """feats is the number of features to be found in channel 0 of the data, which are uses to fit a Gaussian at each node. samples is how many samples per node it sends down the tree, to weight that node according to the samples that can actually reach it. prior_weight is the weight assigned to a prior used on each node to avoid degeneracies - it defaults to 1, with 0 removing it entirly (Not recomended.)."""
    self.feats = feats
    self.samples = samples
    self.prior_weight = prior_weight
    
    self.temp = numpy.empty((2, feats), dtype=numpy.float32)

  def clone(self):
    return DensityGaussian(self.feats, self.samples, self.prior_weight)
  
  
  def stats(self, es, index, weights = None):
    # First calculate the weighted mean of the samples we have...
    data = es[0, index, :].copy()
    
    w = weights[index] if weights!=None else None
    weight = w.sum() if w!=None else float(data.shape[0])
    mean = numpy.asarray(numpy.average(data, axis=0, weights=w), dtype=numpy.float32)
   
    # Offset the data matrix by the mean...
    data -= mean.reshape((1,-1))
    
    # Calculate the size of a symmetric Gaussian, to be used as a prior to avoid degenerate situations...
    sym_var = numpy.square(data).mean()
    if sym_var<1e-3: sym_var = 1e-3 # For safety.
    
    # Now calculate the covariance for a general Gaussian fitted to the data, with a symmetric prior with a weight of one...
    covar = numpy.identity(self.feats, dtype=numpy.float32)
    covar *= sym_var * self.prior_weight
    
    if weights!=None:
      covar += numpy.dot(data.T, data * w.reshape((-1,1)))
      pw = self.prior_weight + weight
      covar *= pw / (pw**2.0 - self.prior_weight**2.0 - numpy.square(w).sum())
    else:
      covar += numpy.dot(data.T, data)
      covar /= self.prior_weight + weight
    
    prec = numpy.linalg.inv(covar)
    
    # Encode what we have in the required format and return it...
    params = numpy.zeros(3, dtype=numpy.float32)
    params[0] = self.prior_weight + weight
    
    return params.tostring() + mean.tostring() + prec.tostring()
  
  def updateStats(self, stats, es, index, weights = None):
    # Extract the previous state...
    params = numpy.fromstring(stats[:12], dtype=numpy.float32)
    precStart = 12 + 4*self.feats
    mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
    prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
    covar = numpy.linalg.inv(prec)
    
    # Calculate the weighted mean of the new samples...
    exData = es[0, index, :].copy()
    
    exMean = numpy.empty(self.feats, dtype=numpy.float32)
    if weights==None:
      exMean[:] = exData.mean(axis=0)
      weight = float(exData.shape[0])
    else:
      w = weights[index]
      weight = w.sum()
      exMean[:] = (exData * w.reshape((-1,1))).sum(axis=0)
      exMean /= weight
    
    # Offset the data matrix by said mean...
    exData -= exMean.reshape((1,-1))
    
    # Calculate the covariance matrix...
    exCovar = numpy.zeros((self.feats, self.feats), dtype=numpy.float32)
    
    if weights!=None: exData[:,:] *= w.reshape((-1,1))
    exCovar += numpy.dot(exData.T, exData)
    exCovar /= weight
    
    # Update the previous model with the new samples...
    newWeight = params[0] + weight
    newMean = (params[0]*mean + weight*exMean) / newWeight
    meanDiff = exMean - mean
    newCovar = covar + exCovar + (params[0]*weight/newWeight) * numpy.outer(meanDiff)
    
    newPrec = numpy.linalg.inv(newCovar)
    
    # Update the log of the normalising constant...
    params[2] = numpy.log(params[1]) + 0.5*numpy.linalg.slogdet(prec)[1] - 0.5*self.feats*numpy.log(2.0*numpy.pi)
    
    # Encode what we have in the required format and return it...
    return params.tostring() + newMean.tostring() + newPrec.tostring()

  def entropy(self, stats):
    # Extract precision...
    precStart = 12 + 4*self.feats
    prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
    
    # Calculate and return the distributions entropy...
    return 0.5 * (numpy.log(2.0*numpy.pi*numpy.e) * self.feats - numpy.linalg.slogdet(prec)[1])


  def postTreeGrow(self, root, gen):
    # Count the total weight in the system, to weight the nodes by the percentage of trainning samples they see...
    def sumWeight(node):
      w = numpy.fromstring(node.stats[:4], dtype=numpy.float32)[0] - self.prior_weight
      if node.test!=None:
        w += sumWeight(node.true)
        w += sumWeight(node.false)
      return w
    
    totalWeight = sumWeight(root)
    
    
    # Define a recursive function to analyse each node...
    def weightNode(node, parents):
      # Decode the samples stats...
      params = numpy.fromstring(node.stats[:12], dtype=numpy.float32)
      precStart = 12 + 4*self.feats
      mean = numpy.fromstring(node.stats[12:precStart], dtype=numpy.float32)
      prec = numpy.fromstring(node.stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
      
      covar = numpy.linalg.inv(prec)
    
      # Send samples down the chain and see how many arrive at the node, to measure how much it has been truncated by the decision boundaries...
      ## Draw the set of samples to send, and stick them into an exemplar set...
      samples = numpy.random.multivariate_normal(mean, covar, (self.samples,))
      samples = numpy.asarray(samples, dtype=numpy.float32)
      
      es = MatrixFS(samples)
      index = numpy.arange(self.samples)
      
      ## Go through the parents, culling samples at each step...
      for par,path in parents:
        res = gen.do(par.test, es, index)
        index = index[res==path]
        if index.shape[0]==0: break

      ## Count the survivors and factor in the weighting to get the tree-shape part of the normalising constant...
      tsWeight = (params[0] - self.prior_weight) / totalWeight
      tsWeight *= float(self.samples) / max(index.shape[0], 1.0)
      
      # Calculate the normalising constant...
      logNorm = numpy.log(tsWeight) + 0.5*numpy.linalg.slogdet(prec)[1] - 0.5*self.feats*numpy.log(2.0*numpy.pi)
      
      # Rencode the nodes stats with the updates...
      params[1] = tsWeight
      params[2] = logNorm
      
      node.stats = params.tostring() + node.stats[12:]
      
      # If it has children recurse to them...
      if node.test!=None:
        weightNode(node.true, parents + [(node,True)])
        weightNode(node.false, parents + [(node,False)])

    # Do each node recursivly, starting from the root...
    weightNode(root, [])
  

  def answer_types(self):
    return {'best':'Point estimate of the probability of the input point'}
  
  def answer(self, stats_list, which, es, index, trees):
    # Process each stat in turn, and calculate the average of the samples probability from each...
    p = 0.0
    
    for stats in stats_list:
      # Extract the details from the stat object...
      params = numpy.fromstring(stats[:12], dtype=numpy.float32)
      precStart = 12 + 4*self.feats
      mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
      prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
      
      # Calculate the probability and add it in...
      delta = es[0,index,:] - mean
      p += numpy.exp(params[2] - 0.5 * numpy.dot(delta, numpy.dot(prec, delta)))
    
    if isinstance(which, str):
      return p / len(stats_list)
    else:
      return tuple([p / len(stats_list)]*len(which))
  
  def answer_batch(self, stats_lists, which, es, indices, trees):
    if weave!=None:
      esAccess = es.codeC(0, 'es')
      
      code = start_cpp() + """
      // Prepare the access to the es...
       %(itype)s es = (%(itype)s)PyList_GetItem(esData, 0);
      
      // Iterate and process each stat list in turn...
       int item_count = PyList_Size(stats_lists);
       PyObject * ret = PyList_New(item_count);
       
       for (int i=0; i<item_count; i++)
       {
        // Get the list of stats objects...
         PyObject * stats = PyList_GetItem(stats_lists, i);
         int statCount = PyList_Size(stats);
         
        // Iterate the list and handle each element in turn...
         float p = 0.0;
         
         for (int j=0; j<statCount; j++)
         {
          // Extract the information regarding the specific stat object...
           float * params = (float*)(void*)PyString_AsString(PyList_GetItem(stats, j));
           float * mean = params + 3;
           float * prec = mean + feats;
           
          // Put the delta into the temporary storage...
           for (int k=0; k<feats; k++)
           {
            TEMP2(0, k) = es_get(es, indices[i], k) - mean[k];
            TEMP2(1, k) = 0.0; // Preparation for the next bit.
           }
           
          // Calculate the multiplication with the precision...
           for (int k=0; k<feats; k++)
           {
            for (int l=0; l<feats; l++)
            {
             TEMP2(1, k) += prec[feats*k+l] * TEMP2(0, l);
            }
           }
           
           float d = 0.0;
           for (int k=0; k<feats; k++)
           {
            d += TEMP2(0, k) * TEMP2(1, k);
           }

          // Do the final parts required...
           p += exp(params[2] - 0.5 * d);
         }
         
         p /= statCount;
        
        // Store the calculated probability...
         PyObject * ans = PyFloat_FromDouble(p);
         PyList_SetItem(ret, i, ans);
       }
      
      // Return...
       return_val = ret;
       Py_XDECREF(ret);
      """%{'itype':esAccess['itype']}
      
      feats = self.feats
      esData = [esAccess['input']]
      temp = self.temp
      ret = weave.inline(code, ['stats_lists', 'indices', 'feats', 'esData', 'temp'], support_code = esAccess['get'])
      
      if isinstance(which, str): return ret
      else:
        return map(lambda p: tuple([p] * len(which)) , ret)
    else:
      return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))


  def summary(self, es, index, weights = None):
    # The summary simply contains a lot of feature vectors, tightly packed, with weights - it will consume a lot of space...
    data = numpy.asarray(es[0,index,:], dtype=numpy.float32)
    if weights==None: weights = numpy.ones(data.shape[0], dtype=numpy.float32)
    else: weights = weights[index]
    
    data = numpy.append(weights.reshape((-1,1)), data, axis=1)
    
    return data.tostring()
  
  def updateSummary(self, summary, es, index, weights = None):
    data = numpy.asarray(es[0,index,:], dtype=numpy.float32)
    if weights==None: weights = numpy.ones(data.shape[0], dtype=numpy.float32)
    else: weights = weights[index]
    
    data = numpy.append(weights.reshape((-1,1)), data, axis=1)
    
    return summary + data.tostring()
  
  def error(self, stats, summary):
    # Error is defined as the negative logarithm of the probability of the data provided...
    
    # Extract the details from the stats object...
    params = numpy.fromstring(stats[:12], dtype=numpy.float32)
    precStart = 12 + 4*self.feats
    mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
    prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
    
    # Factor in each feature vector from the summary, by summing in its negative log liklihood...
    summary = numpy.fromstring(summary, dtype=numpy.float32).reshape((-1, self.feats + 1))
    
    delta = summary[:,1:] - mean.reshape((1,-1))
    vmv = (delta * numpy.dot(prec, delta.T).T).sum(axis=1)
    err = 0.5 * (summary[:,0] * vmv).sum()
    err -= summary[:,0].sum() * params[2]
    
    return (err, None)
