#!/usr/bin/python3

# Copyright (c) 2011 Michael Mol <mikemol@gmail.com>.
#
# This work is licensed under a
# Creative Commons Attribution 3.0 Unported License, viewable at
# http://creativecommons.org/licenses/by/3.0/
#
import random
import bisect
import filterstack
import math
import sqlite3
from operator import add
from functools import reduce


def probchoice(choices, probabilities):
    """Takes a set of choices and their weights, and performs
    a random selection from choices based on those weights.

    Returns a random element from choices."""

    # Need to scale the probabilities down to the 0..1 range.
    scalefactor = reduce(add, probabilities)

    prob_accumulator = 0
    accumulator = []
    for p in probabilities:
        prob_accumulator += float(p) / scalefactor
        accumulator.append(prob_accumulator)

    r = random.random()
    bsct = bisect.bisect_left(accumulator, r)
    chc = choices[bsct]
    return chc

def wordIDFromToken(tok):
    """Takes a token, returns an identifier corresponding to that token.
    
    This identifier can be passed into tokenFromWordID to get the original
    token back.

    Simple objects like strings and numbers should be safe. Do not presume
    complex objects will be successfully returned."""
    # Need to do a tiny bit of massaging of the token. Not everything we'll 
    # be placing will be a string.

    # Do *not* make this 'if tok'
    if None != tok:
        # tok is a string. Prepend an 's'
        tok = "s%s" % tok
    else:
        # tok is a None. Make it an 'n'
        tok = 'n'

    global SQLITECONN
    cur = SQLITECONN.cursor()

    cur.execute('insert OR IGNORE into word_ids(token) values (?)', (tok,))

    # Find the ID associated with the token.
    qry = cur.execute('select ROWID from word_ids where token=?', (tok,))
    val = qry.fetchone()
    cur.close()

    return val[0]

def tokenFromWordID(wordID):
    """Takes an identifier, returns the token associated with that identifier.
    Will throw a missing key exception if this identifier is not known to us."""
    global SQLITECONN

    #Find the token associated with the word ID
    cur = SQLITECONN.cursor()
    qry = cur.execute('select token from word_ids where ROWID=?', (wordID,))
    val = qry.fetchone()
    cur.close()

    val = val[0]

    #De-massage the data.
    
    ty = val[0]
    if 's' == ty:
        # It's a string.
        val = val[1:]
    elif 'n' == ty:
        # It's a None
        val = None

    return val

def incAssoc(idx, tok1, tok2):
    """Takes the markov model number, the lead token, and the follow token,
    and increments the association of that  lead token to that follow token
    for that markov model."""
    global SQLITECONN
    global DEF_INCASSOC

    DEF_INCASSOC.append( (idx, tok1, tok2) )


def dumpBufferColumn(sentencebuf, idx):
    print( "Column %d" % idx)
    for symID in sentencebuf[idx]:
        print( "{ %d[%s] : %d }" % (symID, tokenFromWordID(symID), sentencebuf[idx][symID]), end=' ')
    print()

def fillBufferColumn(sentencebuf, reftoken, idx):
    """Takes a sentence buffer, a reference token, and an index token.

    The reference token is presumed to be some decided-upon symbol preceeding
    the symbol sequence intended to be generated by the sentence buffer.

    The index argument identifies which column of the sentence buffer to
    populate, and is used to query our word association data for all known
    values ever known to follow that reference token that far later."""

    global SQLITECONN
    cur = SQLITECONN.execute('select follower, count from word_associations where offset=? and leader=?', (idx, reftoken))

    ret = {}
    for v in cur:
        ret[v[0]] = v[1]
    cur.close()

    sentencebuf[idx] = ret

#    dumpBufferColumn(sentencebuf, idx)

def getWordSightingCount(tok):
    global SQLITECONN
    cur = SQLITECONN.execute('select sum(count) from word_associations where offset=? and follower=?', (0, tok))
    v = cur.fetchone()[0]
    cur.close()
    return v

def getOptionsLimited(idx, tok1):
    global SQLITECONN
    cur = SQLITECONN.execute('select follower, count from word_associations where offset=? and leader=?', (idx, tok1))
    n = 0
    o_s = None
    n_s = None
    o_m = None
    n_m = None
    # Run through twice. First to calculate the standard deviation.
    review = []
    for op in cur:
        review.append(op)
        v = op[1]
        n += 1
        if n == 1:
            o_m = v
            n_m = v
            o_s = float(0)
        else:
            n_m = o_m + (v - o_m) / n
            n_s = o_s + (v - o_m)*(v - n_m)
            o_m = n_m
            o_s = n_s
    cur.close()

    variance = None
    if n > 1:
        variance = n_s / (n - 1)
    else:
        variance = float(0)
    stddv = math.sqrt(variance)

    # Great, now we have a mean and standard deviation for our follow counts.
    # Let's filter. Start conservatively; we'll only drop anything greater than
    # two standard deviations out.
    distfilter = stddv * 2
    mean = n_m
    newopts = {}

    for op in review:
        v = op
        if abs(mean - v[1]) <= distfilter:
            newopts[v[0]] = v[1]

    return newopts

def clarifyBuffer(sentencebuf, reftoken):
    """Takes a sentence buffer and a reference token.

    The reference token is presumed to be some decided-upon symbol preceeding
    the symbol sequence intended to be generated by the sentence buffer.

    Each column in the sentence buffer will have one or more possible weighted 
    choices, and each of these weights will be adjusted as follows:
    * All weights will be initially halved; this function is called
      iteratively, and stale data is to be devalued.
    * Each value known to follow the reftoken by that position in the column
      (see incAssoc and fillBufferColumn to understand these semantics),
      has its weight increased by the number of times it's been seen in that
      position.
    * All option's weights will be reduced proportionately to the number of
      times that option has ever been seen in input. This eliminates the
      tendency of the system to focus on 'filler' words such as "that", or
      even "such", "as" and "and"

    Finally, any options which are not known to follow the reftoken by the
    examined condition are removed. In some cases, this leaves us without any
    choices. In that case, we refill that buffer column using
    fillBufferColumn."""

    buflen = len(sentencebuf)
    # First/ weaken any prior links in sentencebuf; we've got a new retoken.
    for idx in range(buflen):
        for option in sentencebuf[idx]:
            sentencebuf[idx][option] /= float(2)

    for idx in range(buflen):
        selectFrom = getOptionsLimited(idx, reftoken)

        m_idx = idx # which markov offset will match our reference token to
                    # the column

        # The amount of weight we apply needs to vary depending on how far
        # away the symbol is.
        # Trying a model that increases the weight as we near the middle, but
        # declines sharply beyond that.
        # x is 0 at idx = 0, and 1 at idx = (buflen-1)
        wghtX = idx / float((buflen - 1))
        #  1 + ( (x+1)^2 - 3x^3)
        wghtModifier = 1 + pow(wghtX+1,2) - ( 3 * pow(wghtX,3) )

        # Furthermore, we're very interested in weighting towards sequence
        # termination (looking for None) as we get farther in, so we'll be
        # applying these value as we go along:
        # wght = wght * mul + add
        wghtNoneAdd = idx
        wghtNoneMul = 2

        newopts = {}
        for option in sentencebuf[idx]:
            # Now, let's look this option up in our word associations
            if option in selectFrom:
                # This option is known to follow the reference token.
                # Reinforce it with our stored sight count.
                sentencebuf[idx][option] += float(selectFrom[option])

                # How many times has this word ever been seen?
                sightings = getWordSightingCount(option)

                wght = sentencebuf[idx][option]
                wght /= float(sightings)
                wght *= wghtModifier
                if not option:
                    # option is None, meaning our sequence terminator.
                    wght *= wghtNoneMul
                    wght += wghtNoneAdd
                newopts[option] = wght

        sentencebuf[idx] = newopts

def dumpBuffer(buf):
    # Dump the buffer vertically for offsets, horizontally for symbols.
    offset = 0
    print( "Dumping buffer")
    for col in buf:
        dumpBufferColumn(buf, offset)
        offset += 1

def genSequenceInternal(seed = None):
    """Takes a seed symbol as an optional argument. Returns a generator that
    somewhat randomly selects symbols based on data previously processed, using
    that initial seed as a guide. If the seed is not None, the seed will be the
    first yielded symbol."""

    # If the seed is not None, let it be our first yield.
    if seed:
        yield seed

    # From here on in, we deal in wordIDs, not the original tokens.
    seed = wordIDFromToken(seed)

    sentencebuf = M1COUNT*[{}]
    # An initial fill of possibilities
    for idx in range(M1COUNT):
        fillBufferColumn(sentencebuf, wordIDFromToken(None), idx)

    reftoken = seed
    while True:
        clarifyBuffer(sentencebuf, reftoken)

        pos = sentencebuf.pop(0).items()

        # Did we terminate early?
        if len(pos) == 0:
#            print "TE break"
            break

        pos2 = list(zip(*pos))
        chc = probchoice(pos2[0],pos2[1])
        sym = tokenFromWordID(chc)

        if None == sym:
            # We've chosen to end our sequence here.
#            print "NoneBreak"
            break
        
        # Yield our symbol
        yield sym
        reftoken = chc

    #We dropped an element from the tail end of our buffer.
    #We need to replace it with something. To follow pattern,
    #we should treat our most recent choice as a seed token.
        sentencebuf.append({})
        fillBufferColumn(sentencebuf, reftoken, idx)

def genSequence(seed = None):
    global OUTCHAIN
    """Takes a seed symbol as an optional argument. Returns a generator that
    somewhat randomly selects symbols based on data previously processed, using
    that initial seed as a guide. If the seed is not None, the seed will be the
    first yielded symbol.
    The provided output chain is applied to the provided symbols"""

    output = genSequenceInternal(None)
    if OUTCHAIN:
        output = filterstack.chainRun(OUTCHAIN, output)
    
    for sym in output:
        yield sym

def genSentence(seed = None):
    """Takes a seed symbol as an optional argument. Returns a list of symbols
    somewhat randomly selected based on data previously processed, and guided
    by that seed. If seed is not None, it will be at the beginning of the
    returned sequence"""

    sntnc = []
    for symb in genSequence(seed):
        sntnc.append(symb)
    
    return sntnc

def init(inchain = None, outchain = None):
    """Initializes the module's internal accounting, restores serialized data, etc.
    
    inchain and outchain arguments are filterstack chains for massaging input and
    output data."""
    global M1COUNT
    
    # The number of 1st-order Markov models we'll be using to generate data.
    M1COUNT = 9

    global DEF_INCASSOC
    DEF_INCASSOC = []
    
    global INCHAIN
    INCHAIN = inchain

    global OUTCHAIN
    OUTCHAIN = outchain
    
    global SQLITECONN
    SQLITECONN = sqlite3.connect('chorttoob.sqlite')
    cur = SQLITECONN.cursor()

    # The file in which we'll store which words are associated with which
    try:
        cur.execute('''CREATE TABLE word_associations
        (
            offset      int,
            leader      int,
            follower    int,
            count       int
        ) ;''')
        cur.execute('''CREATE INDEX olf ON word_associations ( offset, leader, follower);''')
    except sqlite3.OperationalError:
        # Table already exists.
        pass

    try:
        cur.execute('''CREATE TABLE word_ids 
        (
            token text unique
        ) ;''')
        cur.execute('''CREATE INDEX wordIdx on word_ids ( token );''')
    except sqlite3.OperationalError:
        # Table already exists.
        pass
    
    cur.close()

    SQLITECONN.isolation_level = 'DEFERRED'

def save():
    global SQLITECONN

    # Start by incrementing our associations
    global DEF_INCASSOC

    updates = []
    update_by_qty = {}
    for assoc in DEF_INCASSOC:
        updates.append(assoc)
        if not assoc in update_by_qty:
            update_by_qty[assoc] = 1
        else:
            update_by_qty[assoc] += 1

    update_args = []
    for srch in update_by_qty:
        update_args.append( (update_by_qty[srch], srch[0], srch[1], srch[2]) )
    needed_keys = list(set(updates))
    DEF_INCASSOC = []

    SQLITECONN.executemany('insert or ignore into word_associations values (?,?,?,0)', needed_keys)
    SQLITECONN.executemany('update word_associations SET count = count + ? where offset=? and leader=? and follower=?', update_args)
    SQLITECONN.commit()

def processAppendNones(indata):
    """Generator that appends M1COUNT None objects to the input sequence.
    For process() internal use. (Processing requires a trail of Nones)"""
    global M1COUNT
    for tok in indata:
        yield tok
    for i in range(M1COUNT+1):
        yield None

def process(indata):
    """Processes a sequence of tokens, represented as an iterable object (such as a list or generator)."""
    global M1COUNT
    global INCHAIN

    toksToProcess = indata
    
    # If we were given an input processing chain, use it.
    if INCHAIN:
        toksToProcess = filterstack.chainRun(INCHAIN, toksToProcess)

    # Ensure we have enough trailing None objects.
    toksToProcess = processAppendNones(toksToProcess)

    noneID = wordIDFromToken(None)

    tokbuffer = [noneID]*(M1COUNT+1)

    for tok in toksToProcess:
        # Shift tokbuffer over by one.
        tokbuffer.pop(0)

        # Subsitute a wordID for our token.
        tok = wordIDFromToken(tok)
        # Append our token
        tokbuffer.append(tok)
        
    	# Parse our buffer image into the database
        # Each iteration of this loop handles a different 1st-order markov chain (for different offsets)
        for idx in range(0, M1COUNT):
            # If the first two elements are both None, then we haven't
            # sifted through to the first real data point yet. Skip it.
            if(tokbuffer[0] == noneID and tokbuffer[1] == noneID):
                continue
            leader = tokbuffer[idx]
            follower = tokbuffer[idx + 1]

            if leader == noneID and follower == noneID:
                # Trail-end of the buffer. Don't bother.
                continue
            
            leader = tokbuffer[0]
            follower = tokbuffer[idx+1]
            incAssoc(idx, leader, follower)

