import random
from datetime import *

from core.models import Webcam, Image
import core.utils as utils
import numpy as np
import os.path
import os


import pylab
from PIL import Image
from PIL import ImageOps as ImOps

os.environ['MPLCONFIGDIR'] = 'tmp'

import scipy.stats as ss
from scipy.cluster.vq import kmeans, kmeans2, whiten
from scipy import io
import sift, ipca, finder, montage

def run(c=5, k=5, month1=1, month2=7, hour1=12, hour2=15):
	cam = 929
	start_date = datetime(2012,int(month1),1)
	end_date = datetime(2012,int(month2),1)
	start_time = time(int(hour1),0,0)
	end_time = time(int(hour2),0,0)

	start_date = datetime.combine(start_date,time(0,0))
	end_date = datetime.combine(end_date, time(0,0))
	imgs = utils.images_in_time_rect(cam, start_date, end_date, start_time, end_time)
	# we convert the names to indices
	year = start_date.year
	year_names, coeffs = finder.get_data(cam, year)
	imgs_names = [image.generateImageName() for image in imgs]
	ids = [year_names.index(name) for name in imgs_names]

	# we need to slice out the indices we care about
	subset = coeffs[ids]
	# subset PCA using 5 components
	V,s,U = ipca.ksvd(subset, c)
	newSV = np.dot(np.diag(s), V.T)
	whitened = whiten(newSV.T) # whitening is required for kmeans
	# doing the kmeans clustering on this subset
	result, distortion = kmeans(whitened, k)
	

def runOLD(c=5, k=5, month1=1, month2=7, hour1=12, hour2=15, m="mean"):
	cam = 929
	start_date = datetime(2012,int(month1),1)
	end_date = datetime(2012,int(month2),1)
	start_time = time(int(hour1),0,0)
	end_time = time(int(hour2),0,0)

	start_date = datetime.combine(start_date,time(0,0))
	end_date = datetime.combine(end_date, time(0,0))
	imgs = utils.images_in_time_rect(cam, start_date, end_date, start_time, end_time)
	# we convert the names to indices
	year = start_date.year
	year_names, coeffs = finder.get_data(cam, year)
	imgs_names = [image.generateImageName() for image in imgs]
	ids = [year_names.index(name) for name in imgs_names]

	# we need to slice out the indices we care about
	subset = coeffs[ids]
	# subset PCA using 5 components
	V,s,U = ipca.ksvd(subset, c)
	newSV = np.dot(np.diag(s), V.T)
	whitened = whiten(newSV.T) # whitening is required for kmeans
	# doing the kmeans clustering on this subset
	result = kmeans2(whitened, k)
	#return (imgs, imgs_names, result)

	labels = result[1]
	clustered = []
	imgs_array = []
	for img in imgs:
		imgs_array.append(convert(img))
	for i in xrange(k):
		indices = np.where(labels==i)
		clustered.append([imgs_array[idx] for idx in indices[0]])

	# get image dimensions
	h,w = imgs_array[0].shape

	results = None
	# we need to flatten each image, compute mean of each cluster
	if m == "mean":
		cluster_means = np.zeros([h,w,k])
		for i in xrange(k):
			cluster = clustered[i]
			cluster_array = np.zeros([h*w,len(cluster)])
			for j in xrange(len(cluster)):
				cluster_array[:,j] = np.array(cluster[j].flat)
			mean_flat = np.mean(cluster_array, axis=1)
			cluster_means[:,:,i] = mean_flat.reshape(h,w)
		results = cluster_means

	else:

	# median image instead

		cluster_medians = np.zeros([h,w,k])
		for i in xrange(k):
			cluster = clustered[i]
			cluster_array = np.zeros([h*w,len(cluster)])
			for j in xrange(len(cluster)):
				cluster_array[:,j] = np.array(cluster[j].flat)
			median_flat = np.median(cluster_array, axis=1)
			cluster_medians[:,:,i] = median_flat.reshape(h,w)
		montage.montage(cluster_medians)
		results = cluster_medians

	# showing montage
	#montage.montage(results)
	print "Cluster sizes: %s" %([len(cluster) for cluster in clustered])
	#pylab.show()
	return (results, clustered)


def convert(img):
	if not isinstance(img, Image):
		return
	else:
		pImg = img.load()
		p_array = np.array(pImg)
		h,w,d = p_array.shape
		p_flat = p_array.reshape(h*w, d)
		p_median = np.median(p_flat, axis=1)
		p = p_median.reshape(h,w)
		return p

def getImagesBetween(imgs, first="20120101", second="20121212"):
	first_date = int(first)
	second_date = int(second)

	ret = []
	for img in imgs:
		name = img.generateImageName()
		this_date = int(name[:8])
		if this_date >= first_date and this_date <= second_date:
			ret.append(img)
	return ret

from urlparse import urlparse, parse_qsl
from urllib import *

class Url(object):
    '''A url object that can be compared with other url orbjects
    without regard to the vagaries of encoding, escaping, and ordering
    of parameters in query strings.'''

    def __init__(self, url):
        parts = urlparse(url)
        _query = frozenset(parse_qsl(parts.query))
        _path = unquote_plus(parts.path)
        parts = parts._replace(query=_query, path=_path)
        self.parts = parts

    def __eq__(self, other):
        return self.parts == other.parts

    def __hash__(self):
        return hash(self.parts)


def do():

	test_path = 'static/mturk/test/'
	
	# reading in all images, then do some stuff with them?
	listing = os.listdir(test_path)
	pImages = []
	sifts = []
	
	for infile in listing:
		jpg_file = '%s%s'%(test_path,infile)
		stripped_file = jpg_file.strip('.jpg')
		pgm_file = '%s.pgm'%stripped_file
		key_file = '%s.key'%stripped_file
	   	im_gray = ImOps.grayscale(Image.open(jpg_file))
	   	pImages.append(im_gray)
	   	im_gray.save(pgm_file)
   		sift.process_image(pgm_file,key_file)
   		l,d = sift.read_features_from_file(key_file)
   		sifts.append(d)
   	
	total_sift_count = 0
	for each in sifts:
		total_sift_count += each.shape[0]
	sifts_mat = np.zeros([total_sift_count,128])
	cur = 0 # pointer to index of the start of current sift feature
	for each in sifts:
		sifts_mat[cur:(cur+(each.shape[0])),:] = each[:,:]
		cur += each.shape[0]
	# sifts_mat contains all sifts in column major
   	# now we do clustering based on normal distance
   	whitened = whiten(sifts_mat)
   	result = kmeans2(whitened, 100)
   	
   	labels = result[1]
   	centroids = result[0]
   	# labels are in a flat list, we need to generate a histogram for each image
	split_labels = []
	cur = 0
	for each in sifts:
		cur_labels = labels[cur:(cur+each.shape[0])]
		split_labels.append(cur_labels)
		cur += each.shape[0]
	 
	print len(split_labels)
   	
   	
	return pImages, sifts, result

# generates histogram from an array of labels
def gen_h(labels):
	import pylab as pl
	
	n,bins,patches = pl.hist(labels,99,normed=0, facecolor='blue',alpha=0.75)
	pl.xlabel('words')
	pl.ylabel('count')
	pl.title('Histogram of SIFT words in image')
	pl.grid(True)
	pl.show()

# this converts the list of labels into list of PCA coefficients
def convert_pca(split_labels):
	mat = []
	for each in split_labels:
		hist = []
		for i in xrange(1,100):
			hist.append(np.count_nonzero(each==i))
		mat.append(hist)
	mat = np.array(mat)
	V,s,U = ipca.ksvd(subset, c)
	SV = s*V
	return SV

def gen_image(coeffs):
	from scipy.misc import toimage
	from helper import normalize3D
	#pca_file = "PCA" + str(year) + ".jpg"
	#pca_path = os.path.join(directory, pca_file)
	#pca_path = os.path.join(pca_file)
	n,c = coeffs.shape
	image_mat = np.zeros([10,n,c])
	for i in xrange(10):
		image_mat[i,:,:] = coeffs
	
	pca_path = 'static/mturk/test/pca.jpg'
	toimage(normalize3D(image_mat)).save(pca_path, quality=95)