import numpy as np
import array
import os
import glob
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
import sys
import os

from layers import Dense
from utils import *

PATCH_COUNT = 400000
COMPONENT_COUNT = 100
n_units = 800
OUT_FOLDER = 'icabasis/'

tf.compat.v1.disable_eager_execution()

if not os.path.exists(OUT_FOLDER):
    os.makedirs(OUT_FOLDER)

fileName = OUT_FOLDER + 'basisicacomps' + str(COMPONENT_COUNT) + 'codes' + str(n_units) + 'patches' + str(PATCH_COUNT) + '.npy'

print(fileName)

data = np.load('imnet/pcaTransformed.npy')[:, :COMPONENT_COUNT]

n_samples, patch_size = data.shape[:2]

#data = np.load('vanhateren_patches.npy')
#n_samples = data.shape[0]
#patch_size = data.shape[1]

WHITEN_DATA = True# we have the whitening, but it wouldnot be needed based on the algorithm.
n_comp = None 

data_flat = np.reshape(data, [data.shape[0], -1])
data_flat -= np.mean(data_flat, axis=0, keepdims=True)
data_flat /= np.std(data_flat, axis=0, keepdims=True)

#if WHITEN_DATA is True:
#    data_cov = np.cov(data_flat.transpose())
#    Q = ZCAMatrix(data_cov)
#    data_flat = np.dot(data_flat, Q)

def G_fun(Z):
    return -tf.log(tf.cosh(Z))

def g_fun(Z):
    return -tf.tanh(Z)

def gprime_fun(Z):
    return tf.square(tf.tanh(Z)) - 1.0

#def G_fun(Z):
#    return tf.log(1.0 + tf.square(Z))

#def g_fun(Z):
#    return (2.0 * Z) / (1.0 + tf.square(Z))

#def gprime_fun(Z):
#    return 2 * (1.0 - tf.square(Z)) / tf.square(1.0 + tf.square(Z)) 

## Some basic things about data in tensorflow
n_dim = data_flat.shape[1]

tf.reset_default_graph()
X = tf.placeholder(shape=[None, n_dim], dtype=tf.float32)
batch_size = 500

## Define parameter tensors 
W_shape = (n_units, n_dim)

# filter matrix W*x or X*W'in matrix form where the rows of X are data points
l2_normalize = lambda w : tf.nn.l2_normalize(w, axis=1)
#with tf.device('/gpu:0'):
W = tf.Variable(tf.nn.l2_normalize(tf.random_normal(W_shape), axis=1), dtype=tf.float32, constraint=l2_normalize)
alpha = tf.Variable(tf.ones(n_units), dtype=tf.float32, constraint=tf.abs) # be careful with constraint
#    alpha = tf.constant(np.ones(n_units), dtype=tf.float32)


Z = tf.matmul(X, W, transpose_b=True)
G = G_fun(Z)
g = g_fun(Z)
gg = gprime_fun(Z)  
gg_mean = tf.reduce_mean(gg, axis=0, keepdims=True)
J_first = tf.reduce_sum(tf.matmul(gg_mean, tf.square(W) * alpha[:, None]))
#J_second = tf.reduce_mean(tf.reduce_sum(tf.square(tf.matmul(g, W * alpha[:,None])), axis=1)) / 2.0
Walpha = W * alpha[:, None]
WW = tf.matmul(Walpha, Walpha, transpose_b=True)
GG = tf.matmul(g, g, transpose_a=True) / batch_size
J_second = tf.reduce_sum(WW * GG) / 2.0
J = (J_first + J_second)

def normalize_W(A): 
    return A / np.linalg.norm(A, axis=1, keepdims=True)

#ica_trainer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
ica_trainer = tf.train.AdamOptimizer(learning_rate=0.02)

#with tf.device('/gpu:0'):
ica_step = ica_trainer.minimize(J) 

#lbfgs_minimizer = tf.contrib.opt.ScipyOptimizerInterface(loss=J, method='CG', options={'maxiter':10, 'disp':None})
patch_data = Data(data_flat, batch_size=batch_size)  
n_epoch = 16
n_iter = n_epoch * 800
train_cost = np.zeros((n_iter,))
train_J1_cost = np.zeros((n_iter,))
train_J2_cost = np.zeros((n_iter,))
sess  = tf.InteractiveSession()
tf.global_variables_initializer().run()
data_train = patch_data.getBatch(batch_size)
data_demo = data_train
for iTr in range(n_iter):
    data_train = patch_data.getBatch(batch_size)
    train_cost[iTr] = J.eval(feed_dict={X: data_train})
    train_J1_cost[iTr] = J_first.eval(feed_dict={X: data_train})
    train_J2_cost[iTr] = J_second.eval(feed_dict={X: data_train})
    
    # SGD step below
    sess.run(ica_step, feed_dict={X: data_train})
    # CG step below
    #lbfgs_minimizer.minimize(sess, feed_dict={X: data_train})
    #W_unorm = W.eval()
    #W.assign(normalize_W(W_unorm)).eval()
    if (iTr + 1) % (n_iter/10) == 0 or iTr == 0:
        #print("iter: %06d; loss:%f; sparsity:%f, dec_norm_penalty:%f"%(iTr, train_loss[iTr], train_sparsity[iTr], train_dec_norm[iTr]))
        print("iter: %06d; cost:%f"%(iTr, train_cost[iTr]))
        print("iter: %06d; J1 cost:%f"%(iTr, train_J1_cost[iTr]))
        print("iter: %06d; J2 cost:%f"%(iTr, train_J2_cost[iTr]))
        
basis = W.eval()
np.save(fileName, basis)
