# All content Copyright 2010 Cyrus Omar <cyrus.omar@gmail.com> unless otherwise
# specified.
#
# Contributors:
#     Cyrus Omar <cyrus.omar@gmail.com>
#
# This file is part of, and licensed under the terms of, the atomic-hedgehog
# package.
#
# The atomic-hedgehog package is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The atomic-hedgehog package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with the atomic-hedgehog package. If not, see <http://www.gnu.org/licenses/>.
"""OpenCL-accelerated spiking neural network simulations."""

import re

import numpy
import ahh.util as util
import ahh.util.opencl as cl
import ahh.util.codegen as cg

version = util.Version("ahh.sim", [ ("Major", 0), ("Minor", 9) ], [ "beta" ])

#
# Notes:
#
# - Good familiarity with Python is assumed. Some advanced Python features are
#   used occasionally. In particular, decorators, the @property decorator,
#   operator overloading, multiple inheritance and even the odd metaclass are
#   used (hopefully tastefully). You'll have to read about these if you
#   want to completely understand whats going on.
#
# - I add *args and **kwargs to all __init__ so super works correctly. See
#   http://fuhm.net/super-harmful/.
#
# - We're assuming you know how spiking neural networks could be implemented in
#   *some* language already. Try Brette et al, 2007 or one of the many
#   computational neuroscience textbooks for an introduction if not.
#
# - The class docstring describes the mutable attributes. Methods are documented
#   in their own docstrings. Readonly attributes are documented in the __init__
#   docstring. Attributes in or passed directly to the base constructor are not
#   redocumented in the derived class.
#

class Node(util.BidirectionalTree, util.Naming):
    """Base node class for a simulation tree.

    sim
        The Simulation this node is bound to (root of the simulation tree.)
    """
    # Declarative nodes need to be initialized along with the instance. This
    # convenient metaclass enables that (by augmenting __init__)
    __metaclass__ = util.HasInitializedProperties

    def __init__(self, parent, basename="Node"):
        util.BidirectionalTree.__init__(self, parent)
        util.Naming.__init__(self, basename)

    @property
    def sim(self):
        return self.parent.sim         # base case in Simulation
    if 0: isinstance(sim, Simulation)  # these are just IDE type hints

def node(cls):
    """Decorator supporting declaratively adding a child node.

    Example
    =======
    class MyUnit(SimulationUnit):
        def __init__(self, ...):
            ...

        @node(StandaloneCode)
        def x_const(code="const int x = 1;", hook="_pre_step_kernel_body"):
            '''Creates a const named x at the top of the kernel.'''

    is equivalent to

    class MyUnit(SimulationUnit):
        def __init__(self, ...):
            ...

            StandaloneCode(self, basename="x_const",
                                 code="const int x = 1;",
                                 hook="_pre_step_kernel_body")

    but is a bit more compact and allows documentation generators to see the
    parameter, its defaults and a docstring.

    Accessing the node during __init__
    ==================================
    The node is actually initialized on first access during __init__, or at the
    end if not accessed. If you use del during __init__, it's not initialized.

    Duck Subclassing
    ================
    The class must follow the util.HasInitializedProperty spec. Node does this,
    but be aware if you are duck subclassing Node. Simplest solution is to
    set the __metaclass__ to util.HasInitializedProperties.

    See also: param, local and state are convenience wrappers around node
              for Param, Local and State respectively.
    """
    def nodemaker(defn):
        basename = defn.func_name
        doc = defn.func_doc
        kwargs = util.func_kwargs(defn)
        kwargs['basename'] = basename

        ## Create initialized property
        storage_name = '_' + basename
        initialized = False

        def fget(self):
            if not initialized: finit(self)
            return getattr(self, storage_name)

        def fset(self, value):
            if not initialized: finit(self)
            return setattr(self, storage_name, value)

        def fdel(self):
            initialized = True
            delattr(self, storage_name, value)

        def finit(self):
            if not initialized:
                cls(self, **kwargs)
                initialized = True

        return util.initialized_property(fget, fset, fdel, finit, doc)

    return nodemaker

class Simulation(Node):
    """Manages a simulation on one device. This is the root Node of the sim.

    ctx
        Each Simulation is permanently bound to a single device context, a
        util.opencl.SimpleDeviceContext instance. See an example script.

    models [readonly]
        A tuple containing the models added.

        Models are a special subset of the nodes. They must:

        - Specify a count, which means they are responsible for that many
          neurons per realization of the network. Models are assigned indices
          sequentially in the order they are added.
        - They must have their own code generation function defined, which the
          Simulation hands control off to for the appropriate index range.

        See the Model abstract base class, and SpikingModel specifically.

    n_neurons_per_realization [calculated]
        The total number of neurons per realization.
        (i.e. the sum of model counts.)

    = Realizations
    You can run multiple realizations of the same network spec in parallel.

    All realizations share the same memory structure, generated code and
    constants. Hoever, they each have their own independent memory contents
    and thus can diverge from one another. You can use the realization_num
    variable in generated code to control this.

    NOTE: Realizations cannot communicate with one another. Communication
          should only happen between units in a Model.

    n_realizations
        The number of realizations of the simulation to run.

    n_neurons_per_sim [calculated]
        The number of neurons the entire simulation is managing.
        (i.e. n_neurons_per_realization * n_realizations)

    = Divisions
    If the memory needed by the realizations exceeds the amount of storage
    available on your device, the realizations will be split into divisions.
    The realizations comprising a division are loaded onto the device and run
    to completion before the next division is loaded and run.

    n_realizations_per_division_max
        WARNING: This will be automatically calculated at some point in the
                 future.

    n_divisions [calculated]
        The number of divisions. See source for calculation.

    n_neurons_per_division_max [calculated]
        The number of neurons per division.
        (i.e. n_neurons_per_realization * n_realizations_per_division_max)

    Simulation steps and corresponding methods:
    1. Finalization of the spec (finalize)
    2. Memory Allocation (allocate)
    3. Code Generation (generate)
    4. Code Compilation (compile)
    5. Runtime (run)
    6. Memory Deallocation (free)
    """
    def __init__(self, ctx):
        Node.__init__(self, None, None)

        self._ctx = ctx

        ## Convenient things to accumulate as models are added:
        self._models = []
        self._model_names = []
        self._model_lookup = { }
        self._model_counts = []
        self._model_offsets = []

        ## Realization-related attributes
        self._n_neurons_per_realization = 0
        self.n_realizations = 1
        self.n_realizations_per_division_max = 1

        ## Timestep-related parameters
        Param(self, 'DT', 0.1)
        Param(self, 't', 'timestep*DT')

        ## Thread index parameters
        cl_type = cl.cl_int
        hook = 'thread_idx_calculations'

        default = cg.ConstDeclaration('gid_expr')
        Param(self, 'gid_expr', 'get_global_id(0)')
        Param(self, 'gid', default, cl_type, hook)

        default = cg.ConstDeclaration('gsize_expr')
        Param(self, 'gsize_expr', 'get_global_size(0)')
        Param(self, 'gsize', default, cl_type, hook)

    @property
    def sim(self):
        return self # base case for Node.sim
    if 0: isinstance(sim, Simulation)

    @property
    def models(self):
        return tuple(self._models)
    if 0: isinstance(models, tuple)

    ## Neuron counts relative to various things
    @property
    def n_neurons_per_realization(self):
        return self._n_neurons_per_realization
    if 0: isinstance(n_neurons_per_realization, int)

    @property
    def count(self):
        """Convenient generic name across models and the whole sim."""
        return self.n_neurons_per_realization
    if 0: isinstance(count, int)

    @property
    def n_neurons_per_sim(self):
        return self._n_neurons_per_realization * self.n_realizations
    if 0: isinstance(n_neurons_per_sim, int)

    @property
    def n_neurons_per_division_max(self):
        return self.n_neurons_per_realization * \
               self.n_realizations_per_division_max
    if 0: isinstance(n_neurons_per_realization_max, int)

    ## Device stuff
    @property
    def ctx(self):
        return self._ctx
    if 0: isinstance(ctx, cl.SimpleDeviceContext)

    @property
    def n_divisions(self):
        return util.int_div_round_up(self.n_realizations,
                                     self.n_realizations_per_division_max)
    if 0: isinstance(n_divisions, int)

    ## (internal interface with Model)
    def _add_model(self, model):
        # Called by Model.__init__. Do not call directly.
        self._models.append(model)
        self._model_names.append(model.name)
        self._model_lookup[model.name] = model
        self._model_counts.append(model.count)
        offset = self._n_neurons_per_realization
        self._model_offsets.append(offset)
        self._n_neurons_per_realization += model.count
        return offset

    ## Specification finalization
    def finalize(self):
        """Triggers finalization steps.

        All parameters for simulation units are modifiable, without side
        effects, unless explicitly specified by the unit, until the
        finalize method is called.

        This allows units to perform any finalization steps and sanity checks
        needed before memory allocation, code generation and runtime proceeds.
        """
        if not self.finalized:
            self._call_staged_hook('finalize')
            self._finalized = True

    def _post_finalize(self):
        """Assertions that should hold for all specifications."""
        assert self.n_realizations > 0
        assert self.n_realizations_per_division_max > 0
        assert self.n_realizations_per_division_max <= self.n_realizations

    @property
    def finalized(self):
        return getattr(self, '_finalized', False)

    ## Memory allocation
    def allocate(self):
        """Allocates (but does not initialize) memory for the simulation.

        This step takes your specification and allocates memory on the device
        for use by each division. This does NOT initialize the memory (except
        for some constants), it is simply allocated.
        """
        if not self.finalized:
            self.finalize()

        if not self.allocated:
            self._call_staged_hook("allocate")
            self._allocated = True

    @property
    def allocated(self):
        return getattr(self, '_allocated', False)

    ## Memory deallocation
    def free(self):
        """Frees all device memory associated with the simulation.

        If you don't do this, automatic garbage collection will free it at
        some point, but often not until you exit your Python shell.
        """
        if self.allocated:
            self._call_staged_hook("free")
            self.ctx.free_all_and_clear()

    ## Code generation
    def generate(self):
        """This step generates the OpenCL code that will run on the device.

        Places it into the cl_code attribute and returns it. You can inspect
        or even modify it (if you dare) after generating it via the cl_code
        attribute.

        See source of this method for the skeleton of a generic simulation,
        then hop into e.g. the SpikingModel class for the skeleton of a
        spiking model.
        """
        # Not worrying about 80-character lines in this section, it gets far
        # too messy. Expand your editor past 80 lines, it'll be ok.

        # Simple class which maintains a list of strings to concatenate at the
        # end to produce code and provides some useful helper functions so
        # indentation isn't messed up and such.
        g = cg.CodeGenerator()

        # Replaces identifiers with expressions from nodes along the current
        # path to the root node.
        g.processor = util.IdentifierProcessor()
        g.processor.substitutor = util.ExpressionSubstitutor(g.processor)

        cg.call_staged_generation_hook(self, 'sim_generate', g)

        self.cl_code = g.code
        util.call_staged_hook(self, "sim_cl_post_processing")
        return self.cl_code

    def _sim_generate(self, g):


        ## Auxiliary kernels
        cg.call_staged_generation_hook(self, "auxiliary_kernels", g)
        g.newline()

        ## Step kernel
        ## = Specify step arguments
        cg.call_staged_generation_hook(self, "step_kernel", g)

    def _step_kernel(self, g):
        g.enter_line()
        g.snip("__kernel void step(")
        g.align_with_last_snip()
        step_arguments = util.ListSeparator(g, ",\n")
        step_arguments += ("const int timestep", "const int realization_start")
        cg.call_generation_hook('_specify_step_arguments', g, False,
                                ('step_arguments', step_arguments))
        g.stop_aligning()
        g.complete_line(") {")
        g.indent_depth += 1
        cg.call_staged_generation_hook(self, "step_kernel_body", g)
        g.indent_depth -= 1
        g.line("}")

    def _step_kernel_body(self, g):
        ## = Generate step kernel body
        cg.call_staged_generation_hook(self, "thread_calculations", g)

        g.line("""// Loops over all neurons in this division (irrespective of which realization they are in)
                  const int first_idx_sim = realization_start * n_neurons_per_realization;  // Index, relative to the simulation, of the first neuron being processed by the current division.
                  const int last_idx_sim = min(first_idx_sim + n_neurons_per_division, n_neurons_per_sim); // and the last one
                  for (int idx_sim = first_idx_sim + gid; idx_sim < last_idx_sim; idx_sim += gsize) {""")
        g.indent_depth += 1
        cg.call_staged_generation_hook(self, "step_kernel_loop_body", g)
        g.indent_depth -= 1
        g.line("}")

    def _step_kernel_loop_body(self, g):
        g.line("""// Neuron index calculations
                  const int realization_num = idx_sim / n_neurons_per_realization;  // index of the realization this neuron is a part of (remember how integer division works)
                  const int realization_start_idx_sim = realization_num * n_neurons_per_realization;  // index of the first neuron in this realization, relative to the simulation
                  const int realization_start_idx_div = (realization_num - realization_start) * n_neurons_per_realization;  // index of the first neuron in this realization, relative to the division
                  const int idx_realization = idx_sim - realization_start_idx_sim;  // index of this neuron relative to the realization
                  const int idx_division = idx_sim - first_idx_sim; // index of this neuron relative to the division""")
        g.newline()

        g.line("// Invoke appropriate model")
        p = util.Partitioner(g, var_name="idx_realization", min_start=0,
                             max_end=self.n_neurons_per_realization)
        for model in self._models:
            p.next(model.offset, model.offset + model.count,
                   lambda g: model._step_kernel(g))

    def compile(self):
        """Compiles the OpenCL code in self.cl_code.

        The resulting program is saved in self.program. The step kernel is
        saved in self._step.
        """
        cl_code = self.cl_code
        ctx = self.ctx
        self.program = ctx.compile(cl_code)
        self._step = self.program.step

    ## Run-time
    def run(self, n_timesteps):
        """
        The simulation runs as discrete timesteps, with device-wide
        synchronization between each timestep across all realizations in the
        current division.

        Full timecourse:

        1. When the run function is called, all simulation units are informed
        by a call to the _prepare_run(run_info) hook. run_info is a RunInfo
        instance.

        2. When a division is loaded, the first step is to ask each simulation
        unit to initialize memory via the _initialize_memory(division_info)
        hook. division_info is a DivisionInfo instance.

        3. Each simulation unit is then sent a _timestep_complete(division_info)
        hook with division_info.t == 0. This is to allow processing of initial
        conditions. Make sure you special case this if your processing requires
        the step function to have run at least once or you want to ignore
        initial conditions.

        4. The time loop is now run. After each step, the
        _timestep_complete(division_info) hook is called. Note that because
        t = 0 is initial conditions, you actually get n_timesteps - 1 total
        state updates. We found this to be the more parsimonious way of doing
        things, but keep it in mind.

        5. After the time loop is complete for a division, the
        _division_complete(division_info) hook is called. The next division
        is loaded onto the device, if necessary - go to step 2.

        6, After all divisions have complete, _run_complete(run_info) is called.
        """
        ## Create run_info
        run_info = RunInfo(n_timesteps)

        ## >>> Node._prepare_run(run_info) <<<
        for sim_unit in self._units:
            sim_unit._prepare_run(run_info)

        for division in xrange(self.n_divisions):
            ## Create division_info
            t = numpy.int32(0)

            realization_start = division * self.n_realizations_per_division_max

            n_realizations = self.n_realizations_per_division_max
            if (realization_start + n_realizations) > self.n_realizations:
                n_realizations = self.n_realizations - realization_start

            division_info = DivisionInfo(run_info, t, realization_start,
                                         n_realizations)

            ## >>> Node._initialize_memory(division_info) <<<
            for sim_unit in self._units:
                sim_unit._initialize_memory(division_info)

            ## >>> Node._timestep_complete(division_info) <<<
            for sim_unit in self._units:
                sim_unit._timestep_complete(division_info) # for initial conds.

            for t in xrange(t + 1, t + n_timesteps):
                division_info.t = t

                # TODO: call step

                ## >>> Node._timestep_complete(division_info) <<<
                for sim_unit in self._units:
                    sim_unit._timestep_complete(division_info)

            ## >>> Node._division_complete(division_info) <<<
            for sim_unit in self._units:
                sim_unit._division_complete(division_info)

        ## >>> Node._run_complete(run_info) <<<
        for sim_unit in self._units:
            sim_unit._run_complete(run_info)

    class RunInfo(object):
        def __init__(self, n_timesteps):
            self.n_timesteps = n_timesteps

    class DivisionInfo(object):
        def __init__(self, n_timesteps, t, realization_start, n_realizations):
            self.n_timesteps = n_timesteps
            self.t = t
            self.realization_start = realization_start
            self.n_realizations = n_realizations


def param(defn):
    """@param is equivalent to @node(Param)"""
    return node(Param)(defn)

class Param(Node):
    """Creates and manages an parameter which can be set with an Expression.

    cl_type
        The OpenCL data type of this parameter.

    default_hook
        If the value requires auxiliary statements to be inserted, where
        should they go by default?

    See Also: @param
    """
    _storage_prefix = '_ahh_sim_Param_value_'
    _manager_prefix = '_ahh_sim_Param_'

    def __init__(self, parent, basename,
                 default=None,
                 cl_type=cl.cl_float,
                 default_hook="_pre_step_kernel_body",
                 doc=None):
        """
        default
            The default value to use. If callable, is called with no
            arguments first. Then coerced to an expression.

        doc
            The docstring to use for the attribute.
        """
        Node.__init__(self, parent, basename)

        self.cl_type = cl_type
        self.default_hook = default_hook

        self._storage_name = storage_name = self._storage_prefix + basename

        def fget(self):
            return getattr(parent, storage_name, None)

        self_ = self
        def fset(self, value):
            cur = getattr(parent, storage_name, None)
            if cur is not None: self_._remove_context(cur)
            value = Expression.coerce(value)
            self_._add_context(value)
            setattr(parent, storage_name, None)

        def fdel(self, value):
            self_._remove_context(getattr(parent, storage_name, None))
            delattr(parent, _PREFIX + basename)

        util.define_property(parent, basename, fget, fset, fdel, doc)

        if hasattr(default, '__call__'):
            # useful with the decorator version since Expression IS stateful
            default = default()
        default = Expression.coerce(default)
        setattr(parent, basename, default)

        setattr(parent, _MANAGER_PREFIX + basename, self)

    ##
    # Expressions set as the values of Params maintain that Param in their
    # variable context so that if you add or set them to another Param, or
    # move them around, the variable bindings don't go away.
    # These two methods manage that functionality.
    def _add_context(self, value):
        if value is not None:
            variable_contexts = getattr(value, 'variable_contexts', None)
            if variable_contexts is not None and self not in variable_contexts:
                variable_contexts.append(self)

    def _remove_context(self, value):
        if value is not None:
            variable_contexts = getattr(value, 'variable_contexts', None)
            if variable_contexts is not None and self in variable_contexts:
                variable_contexts.remove(self)

    ##
    def _finalize(self):
        value = Expression.coerce(getattr(self.parent, self.basename))

        if value is not None:
            variable_contexts = getattr(value, 'variable_contexts', None)
            if variable_contexts and len(variable_contexts) > 0 and \
                variable_contexts[0] == self:
                # The first Param is responsible for specifying the details.
                value.details = cl.GenerationDetails(self.parent,
                    self.name, self.default_hook, self.cl_type)

def local(defn):
    """@local is equivalent to @node(Local)"""
    return node(Local)(defn)

class Local(Param):
    """Reserves a kernel local name as the default value of the parameter.

    See also: @local.
    """
    def __init__(self, parent, basename,
                 cl_type=cl.cl_float,
                 default_hook="_pre_state_calculations",
                 doc=None):
        Param.__init__(self, parent, basename,
                       default=None,
                       cl_type=cl_type,
                       default_hook=default_hook,
                       doc=doc)
        setattr(parent, basename, self.name)

class ModelNode(Node):
    """Node containing some hooks invoked only by the parent model.

    model [readonly]
        The model this unit is bound to.
    """
    def __init__(self, parent, basename="ModelNode"):
        Node.__init__(self, parent, basename)

    @property
    def model(self):
        return self.parent.model
    if 0: isinstance(model, Model)

class Model(ModelNode):
    """Specifies the behavior of a contiguous range of neurons.

    count [readonly, must be >= 1]
        The number of neurons it is responsible for

    offset [calculated]
        The index (relative to the realization) of the first neuron in the
        simulation bound to this model.

        For example, if you add two models, each controlling 1000 neurons,
        their offsets will be 0 and 1000, respectively.
    """
    def __init__(self, parent, count, basename="Model"):
        assert count > 0
        ModelNode.__init__(self, parent, basename)
        self._count = count
        self._offset = sim._add_model(self)

    @property
    def model(self):
        return self # base case for ModelNode.model
    if 0: isinstance(model, Model)

    @property
    def count(self):
        return self._count
    if 0: isinstance(count, int)

    @property
    def offset(self):
        return self._offset
    if 0: isinstance(offset, int)

    ## Model Code Generation
    def _step_kernel(self, sim_g):
        """Generate model-specific code.

        - Return it and place it in the cl_code attribute.
        - The indentation level to start at is passed in by Simulation.
        - See SpikingModel for a concrete example.
        """
        g = cg.CodeGenerator()
        g.indent_depth = sim_g.indent_depth
        g.processor = sim_g.processor

        g.line("""// Model name
        const int idx_model = idx_realization - offset;  // Index of neuron relative to the models offset.""")
        cg.call_staged_generation_hook(self, "model_cl_code", g)

        self.cl_code = g.code
        self._call_staged_hook('_model_cl_post_processing')
        sim_g.snip(self.cl_code, False)

## Standalone buffers (buffers are device-side arrays in OpenCL parlance)
class Allocation(Node, Expression):
    """Allocates a buffer but does not initialize it in any way.

    cl_type, length, tag
        Passed into ctx.alloc. Modifiable until finalization.

    kernel_argument [calculated]
        The signature of the buffer to pass to some kernel. Uses qualifier.
    """
    def __init__(self, parent, basename, cl_type=None, length=None,
                 qualifier="__global const cl_type * const ",
                 tag="Allocations"):
        Node.__init__(self, parent, basename)
        self.cl_type = cl_type
        self.length = length
        self.qualifiers = qualifiers
        self.tag = tag
        self.variable_contexts.append(parent)

    def _allocate(self):
        self.allocation = self.sim.ctx.alloc(self.cl_type, self.length,
                                             self.tag)

    @property
    def kernel_argument(self):
        return prefix + "%s %s" % (self.qualifier, self.name)

    @property
    def expression(self):
        return self.name

class StepAllocation(Allocation):
    """An Allocation which also passes in the buffer to the step kernel."""
    def __init__(self, parent, basename, cl_type=None, length=None,
                 qualifier="__global const cl_type * const ",
                 tag="Allocations"):
        Allocation.__init__(self, parent, basename, cl_type, length,
                            qualifier, tag)

    def _specify_step_arguments(self, step_arguments):
        step_arguments.append(self.kernel_argument)


## Atomics
class Atomics(Node):
    def __init__(self, parent, basename="Atomics"):
        if not hasattr(parent, basename):
            Node.__init__(self, parent, basename)
            setattr(parent, basename, self)

    def _pre_sim_generate(self, g):
        for extension in cl.int32_atomics_extensions:
            g.line(extension.get_pragma_str())

## Errors
class Error(Exception):
    """Base class for errors in ahh.sim"""
    pass

## Spiking-model specific stuff
# (eventually we could release the basic sim stuff independently of the
#  small amount of code supporting spiking models specifically...)
class SpikingModel(Model):
    """The hollow shell of a spiking neuron model.

    Has spiking_condition parameter and the appropriate hooks for code
    generation, see source.
    """
    def __init__(self, sim, count, basename="SpikingModel"):
        Model.__init__(self, sim, count, basename)

    @param
    def spike_condition(default=False):
        """Expression to evaluate to determine whether a spike occurred."""

    def _model_cl_code(self, g):
        Model._model_cl_code(self, g)

        g.line("const int idx_state = idx_model + (realization_num - realization_start)*count;  // Index into state variable arrays.")
        g.newline()

        cg.call_staged_generation_hook(self, "read_incoming_spikes", g)
        g.newline()

        cg.call_staged_generation_hook(self, "insert_spikes", g)
        g.newline()

        cg.call_staged_generation_hook(self, "read_state", g)
        g.newline()

        cg.call_staged_generation_hook(self, "calculate_inputs", g)
        g.newline()

        cg.call_staged_generation_hook(self, "state_calculations", g)
        g.newline()

        cg.call_staged_generation_hook(self, "independent_state_updates", g)
        g.newline()

        cg.call_staged_generation_hook(self, "spike_processing", g)
        g.newline()

    def _spike_processing(self, g):
        g.line("if (spike_condition) {")
        g.indent_depth += 1
        cg.call_staged_generation_hook(self, "spike_generated", g)
        g.indent_depth -= 1
        g.line("} else {")
        g.indent_depth += 1
        cg.call_staged_generation_hook(self, "no_spike_generated", g)
        g.indent_depth -= 1
        g.line("}")

    def _spike_generated(self, g):
        cg.call_staged_generation_hook(self, "spike_state_updates", g)
        g.newline()
        cg.call_staged_generation_hook(self, "spike_propagation", g)

    def _no_spike_generated(self, g):
        cg.call_staged_generation_hook(self, "no_spike_state_updates", g)

def state(defn):
    """@state is equivalent to @node(State)"""
    return node(State)(defn)

class State(ModelNode, Expression):
    """Creates and manages memory for a state variable in a model unit.

    cl_type
        The OpenCL datatype for this variable.

    calculations, calculations_hook
        If auxiliary calculations are needed, the string in calculations
        is placed in the calculations_hook.

    spike_updater, no_spike_updater
        The Expression to use to update the state if there is/is not a spike.

        If None, no update will be done.

    reader
        The string to use to read the state variable from the buffer.

    = After allocation
    allocation
        The unit containing the allocation.

    will_use_independent_update [calculated]
        Returns whether spike_updater and no_spike_updater are the same.

        If True, the variable name passed to spike_updater will be
        "independent_updater" and the default hook will be
        "_pre_independent_state_updates".

        If False, "[no_]spike_updater" and "pre_[no]spike_state_updates" will
        be used for spike_updater and no_spike_updater respectively.
    """
    def __init__(self, parent, basename,
                 cl_type=cl.cl_float,
                 calculations=None, calculations_hook="_state_calculations",
                 spike_updater=None, no_spike_updater=None,
                 independent_updater=None,
                 reader='const cl_type name = alloc[idx_state];',
                 doc=""):
        """
        independent_updater
            Equivalent to setting both spike_updater and no_spike_updater
            to the same value.

        prefix
            Prefix to use for attribute name in parent.
            (Used by the decorator, leave as "" if you initialize manually.)
        """
        ModelNode.__init__(self, parent, basename)

        self.cl_type = cl_type
        self.calculations = calculations
        self.calculations_hook = calculations_hook

        if independent_updater is not None:
            assert spike_updater is None
            assert no_spike_updater is None
            self.spike_updater = self.no_spike_updater = \
                Expression.coerce(independent_updater)
        else:
            assert independent_updater is None
            self.spike_updater = Expression.coerce(spike_updater)
            self.no_spike_updater = Expression.coerce(no_spike_updater)

        self.reader = reader

        setattr(parent, basename, self)
        self.variable_contexts.append(parent)  # in case it is used as the value of a Param elsewhere

    @property
    def will_use_independent_update(self):
        """Are the two updaters the same?"""
        return self.spike_updater == self.no_spike_updater
    if 0: isinstance(will_use_independent_update, bool)

    @param
    def spike_updater(default_hook="_pre_spike_state_updates"):
        """The expression to use to update the state if there is a spike."""

    @param
    def no_spike_updater(default_hook="_pre_no_spike_state_updates"):
        """The expression to use to update the state if there is not a spike."""

    def _pre_finalize(self):
        if self.will_use_independent_update:
            updater = self.spike_updater
            self.spike_updater = None
            self.no_spike_updater = None
            Param(self, "independent_updater", default=updater,
                  cl_type=self.cl_type,
                  default_hook='_pre_independent_state_updates')
        else:
            for updater in (self.spike_updater, self.no_spike_updater):
                if updater is not None:
                    updater.cl_type = self.cl_type

    def _finalize(self):
        if self.calculations is not None:
            calculations = cg.StandaloneCode(self, self.calculations_hook,
                                             self.calculations)

        length = self.model.count * self.sim.n_realizations_per_division_max
        self.alloc = StepAllocation(self, "alloc", self.cl_type,
                                    length, False, "State")

    def _read_state(self, g):
        g.line(self.reader)

    def _update_code(self, g, update_prefix):
        g.line("alloc[idx_state] = %s_updater;" % update_prefix)

    def _independent_state_updates(self, g):
        if self.will_use_independent_update \
           and self.independent_updater is not None:
            self._update_code(g, "independent")

    def _spike_state_updates(self, g):
        if not self.will_use_independent_update \
           and self.spike_updater is not None:
            self._update_code(g, "spike")

    def _no_spike_state_updates(self, g):
        if not self.will_use_independent_update \
           and self.no_spike_updater is not None:
            self._update_code(g, "no_spike")

    @property
    def expression(self):
        return self.name