import free_coding
import process_efw
import cantor
import sys
import target_coding


OFFSET = 0.9
DEFAULT = 0.9
FILENAME =  "my_efw"
CODESIZE = 50

alphabet = ['#','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','-', "'", '.']


(directions, dim) = cantor.setup_directions(alphabet, offset = OFFSET, default = DEFAULT)

free_coding.DEFAULT_ENCODING = [directions[0][0] for i in xrange(CODESIZE)]
free_coding.BACKPROP_DEPTH = 5

#directions[0] = [directions[0][0] for i in xrange(CODESIZE)] #### what the fuck does this line do???
gram = process_efw.WordCorpusGram(process_efw.process_file(FILENAME), alphabet)
#nets_params = free_coding.setup_big_nets(dim, CODESIZE, [75,150,120,75], [60,150,100,120]) + (directions,)
nets_params = free_coding.setup_big_nets(dim, CODESIZE, [200], [200]) + (directions,)

#print free_coding.train(5, gram, *nets_params)

steps = lambda n: free_coding.train(n, gram, learnrate = 0.02, *nets_params)
vsteps = lambda n: free_coding.train(n, gram, learnrate = 0.02, verbose = True, *nets_params)

t_steps = lambda n: target_coding.train(n, gram, learnrate = 0.02, *nets_params)
t_vsteps = lambda n: target_coding.train(n, gram, learnrate = 0.02, verbose = True, *nets_params)


def epochs(epochs, n, out = sys.stdout):
    for e in xrange(epochs):
        print >> out,steps(n)
#        print >> out, ""

def tepochs(epochs, n, out = sys.stdout):
    for e in xrange(epochs):
        print >> out,t_steps(n)
#        print >> out, ""


