import feedforward
import net
import cantor
import process_efw
import backprop
import math
import eval_coding
import character_model
import charcoding

#FILENAME =  "my_efw"
HIDLAYERS = []
LEARNRATE = 0.08
MOMRATE = 0.1


alphabet = ['#','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','-', "'", '.']

(gramsize, model) = character_model.load_model()
WINDOW_SIZE = gramsize - 1

#(directions, dim) = cantor.setup_directions(alphabet, offset = OFFSET, default = DEFAULT)

dim = l = len(alphabet)
#directions = [[OFFSET if i == j else DEFAULT for i in xrange(l)] for j in xrange(l)]



predictor = feedforward.generate_net([dim*WINDOW_SIZE]+HIDLAYERS+[dim], net.gaussianGen)
mom = feedforward.generate_net([dim*WINDOW_SIZE]+HIDLAYERS+[dim], lambda: 0.0)

#gram = process_efw.WordCorpusGram(process_efw.process_file(FILENAME), alphabet)

#charcoders = charcoding.gen_encoders()


def maxindex(output):
    max = 0
    for i in xrange(len(output)):
        if output[i] > output[max]:
            max = i
    return max

def dirty_string_reverse(string):
    result = [s for s in string]
    result.reverse()
    return "".join(result)

def apply(word = None):
    if word is None:
        word = "#" * WINDOW_SIZE + gram.generateWord()
    outsequence = []
    n_input = []
    for n in xrange(len(word)-WINDOW_SIZE):
        for i in xrange(WINDOW_SIZE):
            window = word[n+i]
            window = dirty_string_reverse(window)
            (nin, preact, postact) = charcoding.prop_coder(window, net.sigmoid)
            n_input.extend(nin)

        (output, preact, postact) = feedforward.propagate(n_input, predictor)
#        outletter = eval_coding.pick_letter(alphabet, directions, output)
        outletter = alphabet[maxindex(output)]
        outsequence.append(outletter)

    return word + " | " + "".join(outsequence)


def smallstr(sequence, size = 4):
    return "["+",".join([str(s)[:size] + str(s)[-size:] for s in sequence])+"]"

def train_grams(n):
    global mom
    for i in xrange(n):
        for window in model:
            n_input = []
            wpostact = [None for i in xrange(WINDOW_SIZE)]
            distribution = model[window]

            window = dirty_string_reverse(window)
            for w in xrange(WINDOW_SIZE):
                (win, preact, wpostact[w]) = charcoding.prop_coder(window[w], net.sigmoid)
                n_input.extend(win)

            (output, preact, postact) = feedforward.propagate(n_input, predictor,\
                                                              net.sigmoid)

            errors = [distribution[i] - output[i] for i in xrange(len(output))]

            bp_postact = []
            for i in xrange(WINDOW_SIZE):
                bp_postact += wpostact[i][-1]
            postact = [bp_postact] + postact
            (mom, deltas) = backprop.sqrt_backprop(predictor, errors, preact, postact,\
                                    mom, LEARNRATE, MOMRATE, backprop.dsigmoid)

            wsize = len(n_input) / WINDOW_SIZE
            for i in xrange(WINDOW_SIZE):
                cdeltas = deltas[i*wsize:i*wsize+wsize]
                charcoding.bprop_coder(window[i], cdeltas, backprop.dsigmoid)


            err = 0.0
            for e in errors:
                err += math.fabs(e)

            print err



def old_deprecated_and_wrong_train(n):
    """Doesn't get ordering of input-window in reverse-mode."""
    global mom
    for i in xrange(n):
#        numseq = gram.generate()
#        numseq = [0,0] + numseq # opening window
#        sequence = [directions[num] for num in numseq]
        word = gram.generateWord()
        word = "#"*WINDOW_SIZE + word


        seq_err = 0.0
        for n in xrange(len(word)-WINDOW_SIZE):
            n_input = []
            wpostact = [None for i in xrange(WINDOW_SIZE)]
            for w in xrange(WINDOW_SIZE):
                (win, preact, wpostact[w]) = charcoding.prop_coder(word[n+w], net.sigmoid)
                n_input.extend(win)

            (output, preact, postact) = feedforward.propagate(n_input, predictor, net.sigmoid)

            distribution = model[word[n:n+WINDOW_SIZE]]
            errors = [distribution[i] - output[i] for i in xrange(len(output))]
#            print "dist: " + smallstr(distribution)
#            print "outp: " + smallstr(output)
#            print "diff: " + smallstr(errors)

            bp_postact = []
            for i in xrange(WINDOW_SIZE):
                bp_postact += wpostact[i][-1]
            postact = [bp_postact] + postact
            (mom, deltas) = backprop.sqrt_backprop(predictor, errors, preact, postact,\
                                    mom, LEARNRATE, MOMRATE, backprop.dsigmoid)

            wsize = len(n_input) / WINDOW_SIZE
            for i in xrange(WINDOW_SIZE):
                cdeltas = deltas[i*wsize:i*wsize+wsize]
                charcoding.bprop_coder(word[n+i], cdeltas, backprop.dsigmoid)


            err = 0.0
            for e in errors:
                err += math.fabs(e)
            seq_err += err/len(errors)

        print str(seq_err / len(word)) + " | " + apply(word)



#train(200)
