import process_efw
import feedforward
import backprop
from helpers import maxindex
import math
import random
import target_adapt
import pickle
import cantor

# TODO: divide out way of backproping!
# enable variant of target-backprop

import process_efw
gram = process_efw.load_grammar()

LEARNRATE = 0.1
MOMRATE = 0.05
BIASFACTOR = 2.0
MAXLENGTH = process_efw.MAXLENGTH
#TRUNKATELENGTH = 4
TRUNKATELENGTH = 4

alphabet = process_efw.alphabet

# indirections are not within the range of sigmoid, should however
# be easier to process
INOFFSET = 1.0
INDEFAULT = 0.00
INMID = 0.0
(indirections, dim) = cantor.setup_directions(alphabet, offset = INOFFSET,\
                               default = INDEFAULT, mid = INMID)

OUTOFFSET = 0.5
OUTDEFAULT = 0.5
OUTMID = 0.5
(outdirections, dim) = cantor.setup_directions(alphabet, offset = OUTOFFSET,\
                               default = OUTDEFAULT, mid = OUTMID)

MAXLENGTH = TRUNKATELENGTH

pospart_size = MAXLENGTH * dim
relpart_size = len(alphabet) * len(alphabet)

input_size = len(alphabet) + MAXLENGTH*len(alphabet) + relpart_size
decode_size = len(alphabet) + pospart_size + relpart_size

COMPRESS_SIZE = input_size


words = gram.get_words()



def load_coder(picklefile = None):
    if picklefile is None:
        picklefile = open("flatcoder.pickle", 'r')

    coder = pickle.load(picklefile)
    picklefile.close()
    return coder

#hid_size = (COMPRESS_SIZE + decode_size)/2
layers = [input_size, COMPRESS_SIZE, decode_size]
higen = lambda: random.gauss(0.0, 0.5)

flatcoder = feedforward.generate_net(layers, higen)
#flatcoder = load_coder()
mom = feedforward.generate_net(layers, lambda: 0.0)


def calc_flatcoding(word):
    """Calculates the input-coding to be compressed & reproduced."""
    charcoding = [1.0 if a in word else 0.0 for a in alphabet]

    inposcoding = []

    for w in xrange(min(len(word), MAXLENGTH)):
        inposcoding.extend([1.0 if a == word[w] else 0.0 for a in alphabet])
#        inposcoding.extend(indirections[alphabet.index(word[w])])

#    while len(inposcoding) < pospart_size:
#        inposcoding.append(INDEFAULT)

    outposcoding = []
    for w in xrange(min(len(word), MAXLENGTH)):
        outposcoding.extend(outdirections[alphabet.index(word[w])])

    while len(outposcoding) < pospart_size:
        outposcoding.append(OUTDEFAULT)



    relcoding = []
    for a in alphabet:
        if a in word:
            for b in alphabet:
                if b in word[word.index(a)+1:MAXLENGTH]:
                    relcoding.append(1.0)
                else:
                    relcoding.append(0.0)
        else:
            relcoding.extend([0.0 for b in alphabet])

    incoding = charcoding + inposcoding + relcoding
    outcoding = charcoding + outposcoding + relcoding
    return (incoding, outcoding)



#def ttrain(word, coder = flatcoder, mom = mom):
#    coding = calc_flatcoding(word)
#    (out, activity, net_in, abs_net_in) = feedforward.t_propagate(coding, coder)
#    (net_in_dtargets, synaptic_dtargets, alleviation) = target_adapt.backprop_target([-1.0, 1.0], net, activity, net_in, abs_net_in)



def train(word, coder = flatcoder, mom = mom):
    (incoding, outcoding) = calc_flatcoding(word)
    (out, preact, postact) = feedforward.propagate(incoding, coder, feedforward.sigmoid)
#    weights = char_weights(word, gram)
    errors = [(outcoding[i] - out[i]) for i in xrange(len(outcoding))]
#    werrors = [weights[i] * err * math.exp(err* BIASFACTOR) for err in errors]
#    werrors = [weights[i] * err for err in errors]
    werrors = [err * math.exp(err* BIASFACTOR) for err in errors]


    (mom, deltas) = backprop.backprop(coder, werrors, preact, postact,\
                                mom, LEARNRATE, MOMRATE, backprop.dsigmoid)

    return errors

# wordgen = lambda: gram.generateWord()
def train_step(coder = flatcoder, mom = mom, gram = gram):
    word = gram.generateWord()[:TRUNKATELENGTH]
    errors = train(word, coder, mom)

    classification = [1 if math.fabs(error) < 0.5 else 0 for error in errors]
    correct = 0
    for c in classification:
        correct += c

    avg_err = 0.0
    for e in errors:
        avg_err += math.fabs(e)

    return (word, avg_err, len(classification) - correct)


def runsteps(n, coder = flatcoder, mom = mom, gram = gram):
    for i in xrange(n):
        print train_step(coder, mom, gram)

def store_coder(coder = flatcoder, picklefilename = "flatcoder.pickle"):
    picklefile = open("flatcoder.pickle", 'w')

    pickle.dump(coder, picklefile)
    picklefile.close()

runsteps(100)

store_coder()
