import cantor
import feedforward
import process_efw
import helpers
import random
import backprop
import math

STAND_SLOPE = 1.0
SMALLWEIGHT = 1.0
#SCALE = .03124
SCALE = .001
BIGWEIGHT = 1/SCALE * 99999999999999999999999999999999999999999999999999

#SCALE = .5
alphabet = process_efw.alphabet

gram = process_efw.load_grammar()

class encoder_c(object):
    def __init__(self, dim, directions, alphabet = alphabet, transfer = math.tanh, scale = SCALE):
        self.alphabet = alphabet
        self.dirs = directions
        self.transfer = transfer
        self.scale = scale

    def encode(self, seq):
        d = [self.scale * di for di in self.dirs[alphabet.index(seq[0])]]
        if len(seq) == 1:
            return d
        else:
            c = self.encode(seq[1:])
            return [self.transfer(0.5 * c[i] + d[i]) for i in xrange(len(d))]

# todo: apply backprop

def setup_decoder(dim, directions, bigweight = BIGWEIGHT,\
                  smallweight = SMALLWEIGHT, randizer = lambda: 0.0):
    first_layer = [[randizer()]
                    + [bigweight + randizer() if i == n else 0.0 for i in xrange(dim)]
                    for n in xrange(dim)]
    first_layer += [[randizer()]
                     + [randizer() + smallweight if i == n else 0.0 for i in xrange(dim)]
                     for n in xrange(dim)]

    second_layer = [[randizer()]
                     + [randizer() + 0.5 if i == n else randizer() for i in xrange(dim)]
                     + [randizer() for i in xrange(dim)]
                     for n in xrange(dim)]
    second_layer += [[randizer()]
                     + [randizer() + -0.5 * SCALE * 2.0 if i==n else randizer() for i in xrange(dim)] +
                      [randizer() + 2.0 if i==n else randizer() for i in xrange(dim)]
                      for n in xrange(dim)]

    return [first_layer, second_layer]


def apply_decoder(code, decoder):
    (output, pre_acts, post_acts) = feedforward.propagate(code, decoder)
    outsymb = output[:len(output)/2]
    outcode = output[len(output)/2:]
    return (outsymb, outcode,pre_acts,post_acts)

def decode(cantor_code, decoder):
    return apply_decoder(cantor_code, decoder)

(dirs, dim) = cantor.setup_directions(alphabet,.5, .5, 0.0)
#dim = len(alphabet)
#dirs = [[0.5 if dir == a else -0.5 for dir in alphabet] for a in alphabet]
ccode = lambda sequence: cantor.cantor_coding(sequence, alphabet, dirs, False, dim = dim)[0]

dsetup = lambda: setup_decoder(dim, dirs)

decoder = dsetup()
encoder = encoder_c(dim, dirs)


artanh = lambda x: math.log(math.fabs((x+1)/ (1-x)))/2.0

def backcode(x, d):
    """get inverse of decoding."""
    return artanh(.5 * (2.0*d + artanh(x)))

def robust_encode(sequence, directions, alphabet, scale = SCALE):
    if len(sequence) == 0:
        code = [0.0 for d in directions[0]]
    else:
        a = sequence[0]
        dir = [scale * d for d in directions[alphabet.index(a)]]
        code = robust_encode(sequence[1:], directions, alphabet)
        code = [backcode(code[i], dir[i]) for i in xrange(len(dir))]

    return code

ccode = lambda seq: robust_encode(seq, dirs, alphabet)

def code_compare(sequence, seq_code = None, verbose = True, learn = False):
    if seq_code is None:
        seq_code= ccode(sequence)
    target_seq = ccode(sequence[1:])
    target_dir = dirs[alphabet.index(sequence[0])]
    (outdir, outseq, preact, postact) = decode(seq_code, decoder)
    outletter = helpers.pick_letter(alphabet, dirs, outdir)
    if verbose:
        print "-------------------------------------------"
        print "seq:  " + str(sequence)
        print "code: " + str(seq_code)
        print "-------------------------------------------"
        print sequence[0] + " " + str(target_dir)
        print outletter + " " + str(outdir)
        print "-------------------------------------------"
        print "targ:   " + str(target_seq)
        print "out : " + str(outseq)
        print "-------------------------------------------"
        for pl in preact:
            print ",".join([str(p)[:6] for p in pl])
        print "-------------------------------------------"

    steperr = 0.0

    if learn:
        global momentum
        dir_err = [target_dir[i] - outdir[i] for i in xrange(len(outdir))]
        seq_err = [target_seq[i] - outseq[i] for i in xrange(len(outseq))]
        targ_err = dir_err+seq_err
        for t in targ_err:
            steperr += t
        (momentum, deltas) = backprop.backprop(decoder, targ_err, preact,\
                 postact, momentum, SCALE * 0.001, SCALE * 0.001, backprop.dtanh)
        if verbose:
            print "-------------------------------------------"
            print "err: " + str(targ_err)
            print "-------------------------------------------"

    return (outletter, outseq, steperr)

momentum = [[[0.0 for w in n] for n in l] for l in decoder]


def performance(sequence, seq_code = None, verbose = False, learn = False):
    outseq = ""
    error = 0.0
    for i in xrange(len(sequence)):
        (outsymb, seq_code, steperr) = code_compare(sequence[i:],\
                                                 seq_code, verbose, learn)
        outseq += outsymb
        error += steperr
    if learn:
        return (outseq, error)
    else:
        return outseq


def enc_perf(seq, verbose = False, encoder = encoder):
    seq_code = encoder.encode(seq)
    return performance(seq, seq_code, verbose)

def train(n, verbose = False):
    for i in xrange(n):
        word = gram.generateWord()
        (outword, error) = performance(word, verbose = verbose, learn = True)
        print word + " | " + outword + " | " + str(error)

def enctrain(n, encoder = encoder, verbose = False):
    for i in xrange(n):
        word = gram.generateWord()
        (outword, error) = performance(word, encoder.encode(word), verbose, True)
        print word + " | " + outword + " | " + str(error)



def test_generated(l, ngrams, perf = performance):
    inseq = process_efw.generate_ngram_string(l,ngrams)
    outseq = perf(inseq)
    err = 0
    for i in xrange(1,len(inseq)):
        if inseq[i] != outseq[i]:
            err += 1

    return err

def runtest(maxsize, times, perf = performance):
    ngrams = process_efw.load_ngrams()
    for s in xrange(maxsize+1):
        err = 0.0
        for t in xrange(times):
            err += test_generated(s, ngrams, perf)
        err /= times
        print err

if __name__ == "__main__":
    runtest(60,30, perf = enc_perf)
