# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib  
matplotlib.use('Agg') 
import matplotlib.pyplot as plt
import sys

#sklearn 0.18 version
from sklearn import metrics
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn import mixture
from sklearn.cluster import KMeans
from sklearn import svm
from sklearn.decomposition import PCA
from matplotlib.colors import LogNorm


def newfig(mode='n',dp=200):
	if mode=='n' : 		xy=[3000,2000] 
	elif mode=='b' : 	xy=[5000,4000]
	elif mode=='h' :	xy=[20000,15000]
	xinch = xy[0] / dp
	yinch = xy[1] / dp		
	fig = plt.figure(figsize=(xinch,yinch),dpi=dp)
	return fig
	
def make_inits(df):
	X = np.zeros([len(df),2])
	X[:,0] = df.lng
	X[:,1] = df.lat
	return X


def savefig(fig,fname,dp=200):
	fig.savefig(fname,dpi=dp)
	plt.close(fig)

colors = ['darkgoldenrod',       
	'darkgray',            
	'darkgreen',           
	'darkkhaki',           
	'darkmagenta',         
	'darkolivegreen',      
	'darkorange',          
	'darkorchid',          
	'darkred',             
	'darksalmon',          
	'darkseagreen',        
	'darkslateblue',       
	'darkslategray',       
	'darkturquoise',       
	'darkviolet',          
	'deeppink',            
	'deepskyblue',         
	'dimgray',             
	'dodgerblue',          
	'firebrick',           
	'floralwhite',         
	'forestgreen',         
	'fuchsia',             
	'gainsboro',          
	'gold',                
	'goldenrod',           
	'gray',                
	'green',               
	'greenyellow',         
	'hotpink',             
	'indianred',           
	'indigo',             
	'khaki',               
	'lavender',           
	'lawngreen',          
	'lightblue',           
	'lightcoral',          
	'lightcyan',           
	'lightgreen',          
	'lightgray',           
	'lightpink',           
	'lightsalmon',         
	'lightseagreen',       
	'lightskyblue',        
	'lightslategray',      
	'lightsteelblue',      
	'lightyellow',         
	'lime',                
	'limegreen',           
	'magenta',             
	'maroon',              
	'mediumaquamarine',    
	'mediumblue',          
	'mediumorchid',        
	'mediumpurple',        
	'mediumseagreen',      
	'mediumslateblue',     
	'mediumspringgreen',   
	'mediumturquoise',     
	'mediumvioletred',     
	'midnightblue',        
	'mistyrose',           
	'moccasin',            
	'navajowhite',         
	'navy',               
	'olive',               
	'olivedrab',           
	'orange',              
	'orangered',           
	'orchid',              
	'palegreen',           
	'paleturquoise',       
	'palevioletred',       
	'peachpuff',           
	'peru',                
	'pink',                
	'plum',                
	'powderblue',          
	'purple',              
	'red',                 
	'rosybrown',           
	'royalblue',           
	'saddlebrown',         
	'salmon',              
	'sandybrown',          
	'seagreen',            
	'sienna',              
	'silver',              
	'skyblue',             
	'slateblue',           
	'slategray',          
	'springgreen',         
	'steelblue',           
	'tan',                 
	'teal',                
	'thistle',             
	'tomato',              
	'turquoise',           
	'violet',             
	'white',              
	'yellow',              
	'yellowgreen']

	
def fire_cluster(infiles,outfiles=[],params=[]):
	
	def make_ellipses(gmm, ax, n_classes):
		wh=np.zeros([n_classes,2])
		for n, color in enumerate(colors[:n_classes]):
			if gmm.covariance_type == 'full':
				covariances = gmm.covariances_[n][:2, :2]
			elif gmm.covariance_type == 'tied':
				covariances = gmm.covariances_[:2, :2]
			elif gmm.covariance_type == 'diag':
				covariances = np.diag(gmm.covariances_[n][:2])
			elif gmm.covariance_type == 'spherical':
				covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
			v, w = np.linalg.eigh(covariances)
			u = w[0] / np.linalg.norm(w[0])
			angle = np.arctan2(u[1], u[0])
			angle = 180 * angle / np.pi  # convert to degrees
			v = 2. * np.sqrt(2.) * np.sqrt(v)
			wh[n,0]=v[0];wh[n,1]=v[1]

			# if v[0]<0.1 or v[1]<0.1: continue
			# if v[0]>30 or v[1]>30: continue

			ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],180 + angle, color=color)
			ell.set_clip_box(ax.bbox)
			ell.set_alpha(0.5)
			ax.add_artist(ell)
		return wh

	def gmm(ax,X,n_classes,inits,name='full'):	
		bic = []
		lowest_bic = np.infty

		# name = 'tied'
		# classifier0 = mixture.GaussianMixture(n_components=n_classes,covariance_type=name, 
					# tol=1e-5,max_iter=100, random_state=0,init_params='kmeans',n_init=1)
		# classifier0.fit(X)
		# print classifier0.means_
		# exit(0)

		# for index, (name, classifier) in enumerate(estimators.items()):
		if True:
			# # Since we have class labels for the training data, we can
			# # initialize the GMM parameters in a supervised manner.
			# # classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)  for i in xrange(n_classes)])

		# Train the other parameters using the EM algorithm.
			classifier = mixture.GaussianMixture(n_components=n_classes,means_init=inits,covariance_type=name,
						tol=1e-7,max_iter=400, random_state=0)		
					
					
			classifier.fit(X)
			
			bic.append(classifier.bic(X))
			# print name, bic[-1]
			if bic[-1] < lowest_bic:
				lowest_bic = bic[-1]
				best_gmm = classifier
			
			# print name,classifier.covariances_
			
			# h = plt.subplot(2, n_estimators // 2, index + 1)
			
			wh = make_ellipses(classifier, ax, n_classes)
				# for n, color in enumerate('rgb'):
					# print n, color
				# data = iris.data[iris.target == n]
					# plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
					# label=iris.target_names[n])
								
								
				# # # Plot the test data with crosses
				# # for n, color in enumerate('rgb'):
				# # data = X_test[y_test == n]
					# # plt.plot(data[:, 0], data[:, 1], 'x', color=color)
			
				# # y_train_pred = classifier.predict(X_train)
			# # train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
			# # plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
				 # # transform=h.transAxes)

			# # y_test_pred = classifier.predict(X_test)
			# # test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
			# # plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
					 # # transform=h.transAxes)

		# ax.set_title(name)
		# ax.set_xlim([-20,20])
		# ax.set_ylim([-30,30])
		# plt.legend(loc='lower right', prop=dict(size=12))
		# fig.savefig('gmm_%s.jpg'%label)
		# plt.close(fig)
			
		print lowest_bic
		print 'score',classifier.score(X),classifier.lower_bound_,classifier.converged_
		print 'n_iter',classifier.n_iter_
		return wh

	def whole_gmm(df,n,inits,type='full'):
		fig = newfig()
		ax=fig.add_subplot(111)
		
		X = make_inits(df)
		wh = gmm(ax,X,n,inits,type)
		
		#plot all patients per hospital
		ax.plot(X[:,0],X[:,1],'b.',markersize=2)
		ax.plot(inits[:,0],inits[:,1],'xk',markersize=10)	
		ax.set_xlim(xlim)
		ax.set_ylim(ylim)
		
		savefig(fig,'gmm_%s.jpg'%type)
		fig.clf()
		plt.close(fig)

	def mean_shift(X):
		from itertools import cycle
		fig = newfig()
		ax=fig.add_subplot(111)

		# The following bandwidth can be automatically detected using
		# quantile : float, default 0.3
		#			should be between [0, 1] 0.5 means that the median of all pairwise distances is used.
		bandwidth = estimate_bandwidth(X, quantile=0.02, n_samples=500)

		ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
		ms.fit(X)
		labels = ms.labels_
		cluster_centers = ms.cluster_centers_

		labels_unique = np.unique(labels)
		n_clusters_ = len(labels_unique)

		print("number of estimated clusters : %d" % n_clusters_)

		# colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
		for k, col in zip(range(n_clusters_), colors):
			my_members = labels == k
			cluster_center = cluster_centers[k]
			ax.plot(X[my_members, 0], X[my_members, 1],'.', color=col)
			ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
					 markeredgecolor='k', markersize=8)
		ax.set_title('Estimated number of clusters: %d' % n_clusters_)
		# ax.plot(X[:,0],X[:,1],'o',markersize=1)
		# ax.plot(np.array(x0),np.array(y0),'or',markersize=5)	
		# ax.set_xlim([-50,50])
		# ax.set_ylim([-50,50])
		
		savefig(fig,'mean_shift.jpg')
		
	def kmeans(df,nc=100,inits='k-means++'):
		print 'process kmeans with n_cluster=',nc
		X = make_inits(df)
		km = KMeans(n_clusters=nc,random_state=170,init=inits,max_iter =500,algorithm='full').fit_predict(X)
		# y_pred = KMeans(n_clusters=30, random_state=random_state).fit_predict(X)
		print 'km clusters',len(np.unique(km)), 'unclustered',	len(km[km==-1]), min(km),max(km)
		
		fig = newfig()
		ax=fig.add_subplot(111)
		ax.scatter(X[:, 0], X[:, 1], c=km, s=2)	
		
		uni = np.unique(km)
		for k in uni:
			ax.plot(np.mean(X[km==k,0]),np.mean(X[km==k,1]),'sk',alpha=0.6)
		
		ax.set_xlim(xlim)
		ax.set_ylim(ylim)
		savefig(fig,'kmeans.jpg')
		return km
		
	def plot_kmeans(df0,km,ks,nK,hc='norm'):
		df = df0.copy(deep=True)
		X = make_inits(df)
		df['km'] = km
		m = np.zeros([len(df.km.unique()),len(df[ks].unique())])
		
		if ks in ['QHCS','ZJYY']:
			df[ks].fillna('-1',inplace=True)
			df=df[df[ks]!='-1']
		df.sort_values(ks,inplace=True)
		df.reset_index(inplace=True)
		uni = df[ks].unique()
		print 'class', ks, 'num', len(uni)
		for k, g in df.groupby('km'):
			for i,y in zip(range(len(uni)),uni):
				gg=g[g[ks]==y]
				m[k,i] = float(len(gg))/len(g) if hc=='norm' else len(gg)
		
		# pca = PCA(n_components=3)
		# pca.fit(m)	 
		# print 'PCA',pca.n_components_
		# print pca.components_
		m2 = PCA(n_components=3).fit_transform(m)
		
		# fig = newfig()
		# ax=fig.add_subplot(111)
		# ax.scatter(m2[:, 0], m2[:, 1])
		# savefig(fig,'pca_%s.jpg'%ks)
		
		
		np.savetxt('%s_m.csv'%ks,m.T,fmt='%f')
		os.system('Rscript ../kl_cluster.R %s_m.csv %s_cluster.csv'%(ks,ks))
		
		c = pd.read_csv(ks+'_cluster.csv')
		c = c[c.K==nK].drop('K',axis=1)
		c.index=c.ID-1
		
		np.random.seed(0)
		
		fig = newfig()
		ax=fig.add_subplot(111)
		ax.scatter(X[:, 0], X[:, 1], c=km,s=2)	
		for k, g in df.groupby('km'):
			clr = c.loc[k,'cluster']-1
			clr = np.random.randint(0,c.cluster.max())
			
			ids = np.argwhere(km==k)
			ax.scatter(X[ids, 0], X[ids, 1], c=colors[clr],s=2)	
			
			ax.scatter(g.lng.mean(), g.lat.mean(), s=(float(len(g))/len(g[ks].unique()))*50,c=colors[clr])
			
		ax.set_xlim(xlim)
		ax.set_ylim(ylim)		
		savefig(fig,'kmeans_%s_%s.jpg'%(hc,ks))
		
		def plot_cluster(ks,nK,sub):	
			fig = newfig()
			fig, axs = plot.subplots(nK/sub,sub, sharey=True, sharex=True,tight_layout=True)
			for cluster,g in c.groupby('cluster'):
				ai=cluster-1
				ids = g.ID.values
				# axs[ai/sub,ai%sub].set_title('num %d'%len(g),fontsize=6)
				for id in ids:
					axs[ai/sub,ai%sub].plot(m[id-1,:],color=colors[ai],linewidth=1)	
				axs[ai/sub,ai%sub].text(0,np.max(m[ids-1,:].ravel()),'num %d'%len(g),fontsize=6)
			savefig(fig,'cluster_%s_%s.jpg'%(hc,ks))
			
		plot_cluster(ks,nK,2)
		
	def dbscan(X):
		fig = newfig()
		ax=fig.add_subplot(111)

		db = DBSCAN(eps=0.005, min_samples=20,algorithm='kd_tree').fit(X)
		labels = db.labels_

		# Number of clusters in labels, ignoring noise if present.
		n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)

		print('Estimated number of clusters: %d' % n_clusters_)
		# print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
		# print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
		# print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
		# print("Adjusted Rand Index: %0.3f"
			  # % metrics.adjusted_rand_score(labels_true, labels))
		# print("Adjusted Mutual Information: %0.3f"
			  # % metrics.adjusted_mutual_info_score(labels_true, labels))
		# print("Silhouette Coefficient: %0.3f"
			  # % metrics.silhouette_score(X, labels))

		# Black removed and is used for noise instead.
		unique_labels = set(labels)
		# colors = [plt.cm.Spectral(each)
				  # for each in np.linspace(0, 1, len(unique_labels))]
		for k, col in zip(unique_labels, colors):
			class_member_mask = (labels == k)
			xy = X[class_member_mask]
			print k, len(xy)

			if k == -1:
				# # Black used for noise.
				# col = [0, 0, 0, 1]
				continue			
			
			plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
					 markersize=2) #markeredgecolor='k',

			# xy = X[class_member_mask & ~core_samples_mask]
			# plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
					 # markeredgecolor='k', markersize=6)
		ax.set_xlim(xlim)
		ax.set_ylim(ylim)
		plt.title('Estimated number of clusters: %d' % n_clusters_)
		savefig(fig,'dbscan.jpg')	
		plt.close(fig)
		return labels
	
	##### calc KDE using Gaussian #####
	def plot_kde(df,bdw=0.001,cg=0.01,lbl='',xlim=[],ylim=[],ks='',outfiles=[]):
		kdefigname,edgefigname,gridfigname = outfiles
		
		from sklearn.neighbors import KernelDensity
		df = df[(df.lng>=xlim[0])&(df.lng<=xlim[1])&(df.lat>=ylim[0])&(df.lat<=ylim[1])]
		X = make_inits(df)
		# from mpl_toolkits.mplot3d import Axes3D
		
		# make grid for KDE calc
		x,y = np.meshgrid(np.arange(xlim[0],xlim[1],bdw),np.arange(ylim[0],ylim[1],bdw))
		xy = np.vstack([x.ravel(),y.ravel()]).T
		print 'mesh grid',xy.shape, 'data shape',X.shape
		kde = KernelDensity(bandwidth=bdw, metric='euclidean',
							kernel='gaussian', algorithm='ball_tree')
		kde.fit(X)
		# z = np.exp(kde.score_samples(xy))
		z = kde.score_samples(xy)
		print 'z score shape',z.shape
		z = z.reshape(x.shape)
		
		# make coarse grid
		df.loc[:,'cxg'] = (df.lng-xlim[0])/cg; df.cxg = df.cxg.astype(int)
		df.loc[:,'cyg'] = (df.lat-ylim[0])/cg; df.cyg = df.cyg.astype(int)
		
		# ks = 'yQ'
		nElem = len(df[ks].unique())
		glist = []
		gfilx = []
		gfily = []
		for xy,g in df[['cxg','cyg',ks]].groupby(['cxg','cyg']):
			if len(g[ks].unique()) > nElem * 0.75:
				uniq,cnt = np.unique(g[ks],return_counts=True)
				gfilx.append(xy[0])
				gfily.append(xy[1])
				glist.append((xy[0],xy[1],len(g),np.mean(cnt),cnt.std()))
				
		print 'selected grid',len(glist)
		dffil = pd.DataFrame({'cxg':gfilx,'cyg':gfily})
		df = pd.merge(df,dffil,on=['cxg','cyg'])
		
		dic = {}
		for key, g in df[['cxg','cyg',ks]].groupby(ks):
			gcnt = g.groupby(['cxg','cyg'],as_index=False).agg(np.size)
			for i in range(len(gcnt)):
				for j in range(i+1,len(gcnt)):
					k = '%d_%d'%(gcnt.cxg[i],gcnt.cyg[i])+','+'%d_%d'%(gcnt.cxg[j],gcnt.cyg[j])
					v = min(gcnt.loc[i,ks],gcnt.loc[j,ks])
					if dic.has_key(k):
						dic[k] = max(dic[k],v)
					else:
						dic[k] = v
						
		with open(edgefigname,'w') as fo:
			for k in dic.keys():
				print >> fo, k+',',dic[k]
		
		garray = np.array(glist)
		
		fig = newfig()
		ax=fig.add_subplot(111)
		ax.plot(garray[:,3],garray[:,4],'.')
		# savefig(fig,'grid_count_%.3f_%s.jpg'%(bdw,lbl))
		savefig(fig,gridfigname)
		plt.close(fig)
		
		
		
		fig = newfig()
		levels = np.linspace(0,z.max()*0.9,50)
		# ax = Axes3D(fig)
		ax=fig.add_subplot(111)
		ax.contourf(x,y,z,levels=levels,cmap=plt.get_cmap('rainbow'))
		
		ax.plot(garray[:,0]*cg+xlim[0]+cg/2.,garray[:,1]*cg+ylim[0]+cg/2.,'.',markersize = 50)
		# print garray[:,0]*cg+xlim[0],garray[:,1]*cg+ylim[0]
		
		def find_locMax(x,y,z):
			nx,ny=x.shape
			loc=[]
			for i in xrange(1,nx-1):
				for j in xrange(1,ny-1):
					if z[i,j]>z[i-1,j] and z[i,j]>z[i+1,j] and z[i,j]>z[i,j-1] and z[i,j]>z[i,j+1] and z[i,j]>z[i-1,j-1] and z[i,j]>z[i-1,j+1] and z[i,j]>z[i+1,j-1] and z[i,j]>z[i+1,j+1]:
						loc.append((x[i,j],y[i,j]))
			return np.array(loc)
		
		lmax = find_locMax(x,y,z)
		for xy in lmax:
			ax.plot(xy[0],xy[1],'sk',alpha=0.5)	
		# ax.scatter(X[:,0],X[:,1],X[:,0]*z.max(),s=2,c='g')
		
		ax.set_xlim(xlim)
		ax.set_ylim(ylim)
		# savefig(fig,'kde_%.3f_%s.jpg'%(bdw,lbl))
		savefig(fig,kdefigname)
		plt.close(fig)
		
		return lmax

	def plot_hist(a):
		fig, axs = plot.subplots(2,1, sharey=True, tight_layout=True)
		axs[0].hist(a.GHMJ,bins=range(0,100,1)); axs[0].set_title('GHMJ')
		axs[1].hist(a.ZJJJSS,bins=range(0,2500,20)); axs[1].set_title('JJSS')
		savefig(fig,'../hist_ghmj_jjss.jpg')

		fig = newfig()
		ax=fig.add_subplot(111)
		ax.hist2d(a.GHMJ,a.ZJJJSS,bins=[range(0,20,1),range(0,2000,10)],norm=LogNorm())
		savefig(fig,'../hist2d_ghmj_jjss.jpg')
		
		fig = newfig()
		ax=fig.add_subplot(111)
		counts, xedges, yedges, Image = ax.hist2d(a.lng,a.lat,bins=[np.arange(xlim[0],xlim[1],0.001),np.arange(ylim[0],ylim[1],0.001)],norm=LogNorm())
		savefig(fig,'../hist2d_xy.jpg')
	
	city 	= eval(params[0])
	year	= eval(params[1])
	kde_grid= eval(params[2])
	xlim	= eval(params[3])
	ylim	= eval(params[4])
	a = pd.read_csv(infiles[0])
	a=a[a.city==city]
	a=a[a.yr>=year]

	# uaddr, cnt = np.unique(a.addr,return_counts=True)
	# sortid = np.argsort(cnt)[::-1]
	# for i in sortid[:10]:
		# print cnt[i], uaddr[i].decode('gbk').encode('utf-8')

	a=a[(a.lat>0)&(a.lng>0.)]

	# picdir = '%.3f'%kde_grid
	# if not os.path.exists(picdir):
		# os.mkdir(picdir)
	# print 'processing kde grid =', kde_grid

	# os.chdir(picdir)

	##### plot histogram of GHMJ, CCSS ... #####
	#plot_hist(a)
	
	loc_max = plot_kde(a,bdw=kde_grid,cg=0.01,lbl='',xlim=xlim,ylim=ylim,ks='yQ',outfiles=outfiles)
	
	exit()

	# lab = dbscan(X) # not working, either most are count in one class or most are counted unlabeled
	# a['dbscan']=lab
	# a=a[a.dbscan>0]
	# X = np.zeros([len(a),2])
	# X[:,0] = a.lng
	# X[:,1] = a.lat

	nc,dmy=loc_max.shape
	km=kmeans(a,nc,loc_max)

	# unikm,cnt = np.unique(km,return_counts=True)
	# selkm = unikm[cnt>20]
	# a['km']=km
	# a=a[a.km.isin(selkm)]
	# inits=[ (np.mean(a[a.km==kx].lng),np.mean(a[a.km==kx].lat)) for kx in selkm]
	# npinits = np.array(inits) 
	# print 'gmm init', len(selkm),npinits.shape
	# whole_gmm(a,len(selkm),npinits,'spherical')
	# exit()

	kslst = ['hour','QHCS','ZJYY','yQ','yM','ghmj_hist','jjss_hist','month']
	kslst = ['yM']

	for ks in kslst:
		plot_kmeans(a,km,ks,10,'abs')
		plot_kmeans(a,km,ks,10,'norm')

	# mean_shift(X)