import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import tensorflow as tf
from sklearn.decomposition import PCA
import os
import timeit

PATCH_SIZE = 32
#Number of total patches
PATCH_COUNT = 10000
#Number of samples to process in a single batch
BATCH_SIZE = 500
PCA_BATCH_SIZE = 2400
#Number of PCA components
PCA_COMP_COUNT = 350
OUT_FOLDER = 'imnet_inference/'

start = timeit.default_timer()

if not os.path.exists(OUT_FOLDER):
    os.makedirs(OUT_FOLDER)

filters = np.load('gabors.npy').astype(np.float32)

samples = np.memmap('patchessmall', dtype='float32', mode='r', shape = (PATCH_COUNT, PATCH_SIZE, PATCH_SIZE, 1))
v1SimpleFile = np.memmap(OUT_FOLDER + 'v1Simple', dtype='float32', mode='w+', shape = (PATCH_COUNT, 11, 11, 3, 12, 2))
v1ComplexFile = np.memmap(OUT_FOLDER + 'v1ComplexSubMean', dtype='float32', mode='w+', shape = (PATCH_COUNT, 4356))
anglesFile = np.memmap(OUT_FOLDER + 'angles', dtype='float32', mode='w+', shape = (PATCH_COUNT, 11, 11, 3, 12, 1))

loopCount = int(PATCH_COUNT / BATCH_SIZE)

with tf.device('/gpu:0'):
	for i in range(loopCount):

		bStart = i * BATCH_SIZE
		bEnd = bStart + BATCH_SIZE

		batch = samples[bStart:bEnd, :, :, :]

		filters = np.reshape(filters, (12, 12, 1, -1))

		filtersTensor = tf.convert_to_tensor(filters)

		v1Responses = tf.nn.conv2d(batch, filtersTensor, [1, 3, 3, 1], 'SAME')

		v1Responses = tf.reshape(v1Responses, [BATCH_SIZE, -1, 3, 12, 2])

		spatialDims = int(np.sqrt(v1Responses.shape[1]))

		v1Responses = tf.reshape(v1Responses, [BATCH_SIZE, spatialDims, spatialDims, 3, 12, 2])

		pair0, pair1 = tf.split(v1Responses, num_or_size_splits = 2, axis = -1)

		angles = tf.atan2(pair1, pair0)

		v1CResponses = tf.sqrt(tf.reduce_sum(tf.square(v1Responses), axis = -1))

		v1CResponses = tf.reshape(v1CResponses, [BATCH_SIZE, -1])

		v1SimpleFile[bStart:bEnd, :] = v1Responses[:]
		v1ComplexFile[bStart:bEnd, :] = v1CResponses[:]
		anglesFile[bStart:bEnd, :, :, :, :, :] = angles[:]

v1ComplexMean = np.mean(v1ComplexFile, axis = 1, keepdims = True)

v1ComplexFile -= v1ComplexMean
v1ComplexBatch = np.memmap(OUT_FOLDER + 'forpca', dtype='float32', mode='w+', shape = (PCA_BATCH_SIZE, v1ComplexFile.shape[1]))

for i in range(0, PCA_BATCH_SIZE):
	randIndex = np.random.randint(0, PATCH_COUNT)
	v1ComplexBatch[i, :] = v1ComplexFile[randIndex, :]

# Subtract mean output over samples for PCA
v1ComplexBatch -= np.mean(v1ComplexBatch, axis = 1, keepdims = True)

#pca = PCA(n_components = PCA_COMP_COUNT, whiten = True)
#pca.fit(v1ComplexBatch)

#U, s, V = np.linalg.svd(v1ComplexBatch, full_matrices = False)
#eigVecs = V[:PCA_COMP_COUNT, :]
#eigVals = s[:PCA_COMP_COUNT] ** 2.0 / float(v1ComplexBatch.shape[0] - 1)

cov = np.cov(v1ComplexBatch.T)
eigVals, eigVecs = np.linalg.eig(cov)
eigVecs = eigVecs.T

pcaTransformed = np.memmap(OUT_FOLDER + 'pcaTransformed', dtype='float32', mode='w+', shape = (PATCH_COUNT, PCA_COMP_COUNT))

pcaForward = eigVecs[:PCA_COMP_COUNT] * (1.0 / np.sqrt(eigVals[:PCA_COMP_COUNT])[:, np.newaxis])

for i in range(loopCount):
	bStart = i * BATCH_SIZE
	bEnd = bStart + BATCH_SIZE
	pcaTransformed[bStart:bEnd] = np.dot(v1ComplexFile[bStart:bEnd], pcaForward.T)

np.save(OUT_FOLDER + 'eigvals.npy', eigVals)
np.save(OUT_FOLDER + 'eigvecs.npy', eigVecs)
np.save(OUT_FOLDER + 'forwardPCA.npy', pcaForward)
np.save(OUT_FOLDER + 'inversePCA.npy', eigVecs[:PCA_COMP_COUNT] * np.sqrt(eigVals[:PCA_COMP_COUNT])[:, np.newaxis])
np.save(OUT_FOLDER + 'pcaTransformed.npy', pcaTransformed)
np.save(OUT_FOLDER + 'v1cmean.npy', v1ComplexMean)

stop = timeit.default_timer()

print('Time: ', stop - start)
