# -*- coding: utf-8 -*-
"""
This file is part of tptsig
Copyright © 2011 Telecom ParisTech, TSI
Auteur(s) : Liutkus Antoine
tptsig is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU LesserGeneral Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""

import numpy as np
from scipy import *
import random
from wavfile import *
from hdf5buffer import *

from misc import *
import time
import gc
				
class nmf:
	"""NMF class, to perform nonnegative tensor factorization for 3 dimensional tensors using the nonnegative parafac model.
	This version implements:
	* handling of very large matrices through HDF5.
	* any beta divergence (Euclidean distance is beta=2, Kullback Leibler is beta=1, Itakura-Saito is beta=0)
	* penalization of correlation of the time activation H, or of the spectral basis W.
	* efficient computations through batching via small patchs.
	* handling several number of bytes for each parameter, either 4 or 8"""
	
	#model parameters
	W = None;
	H = None;
	Q = None;
   
   #shape of the batchs, and number of splitting in the signal to fetch batch data. For example, if nBatchs=3,
   #9 patchs will be taken randomly from the data, totalizing a size of batchShape, to perform computations.
   #This method is suboptimal, but converges very well in practice and permits computations on extremely large matrices.
	batchShape = (300,300)
	nBatchs = 3
	
	def Help(self):
		print """ NMF class
			performs beta-divergence NMF for multidimensional data. Handles :
				* any beta-divergence, KL, Euclidean, Itakura Saito as special cases
				* supports correlation constrains
				* multidimensional data. Handling of third dimension is handled through 
				  a non negative PARAFAC modeling (instantaneous mixing)
				* Easy signals rendering through the use of the Signal class
		"""
   
   
	# Constructor
	def __init__(self, S=None, nComponents = 50, beta = (1, 0),nbytes = 4):       
		#handling bitdepth of the parameters
		if nbytes == 4 : 
			self.defaultType = "f4"
		elif nbytes == 8:
			self.defaultType = "f8"
		else:
			print "wrong number of bytes for data. Must be either 4 or 8.";
		
		
		#Observation matrix initialization. Data is either a Signal, a ndarray or a hdf5buffer
		if isinstance(S,Signal):
			#on a signal, NMF is performed on squared STFT
			print "NMF on a Signal. Getting squared STFT."
			if S.stft is None : S.STFT()
			self.S = S.stft.new(atom = "float32")
			self.S.data[:,:,:] = abs(S.stft.data[:,:,:])**2
		elif isinstance(S, ndarray):
			#on a ndarray, NMF is performed on data directly
			self.S.data = S;
			if self.S.data.ndim < 3: 
				newShape = list(self.S.data.shape);
				for r in range(3-self.S.data.ndim): newShape.append(1);
				self.S.data.shape = newShape;
		elif isinstance(S, hdf5Buffer):
			#on a hdf5buffer, NMF is performed on data directly
			self.S = S;
			if self.S.data.ndim < 3: 
				newShape = list(self.S.data.shape);
				for r in range(3-self.S.data.ndim): newShape.append(1);
				self.S.data.shape = newShape;
		else: 
			self.S.data = array((0,0,0)).astype(self.defaultType);
			
		print "Initializing NMF with " + str(nComponents) + " components."
		#Initialization of model's data
		self.nComponents = nComponents;
		
		self.hCorrConstraint = 0
		self.wCorrConstraint = 0
		self.qCorrConstraint = 0
		
		self.beta = beta;
		self.weighting = None;
		
		self.otherSources = None;
		self.otherSourcesAmp = None;
		self.tuneOtherSourcesAmp = False;
		
		self.initComponents();
		
	
	def initComponents(self, sources=None):
		if not sources:
			sources = range(0,self.nComponents);
				
		#initializes Q
		self.Q = ones((self.S.data.shape[2],self.nComponents),dtype = self.defaultType)/self.S.data.shape[2];
			
		#initialized W and H
		if (not self.W) or (not self.H) or (not self.Q) or (  self.nComponents != self.W.shape[1] )  or ( self.nComponents != self.H.shape[0]): 
			print "Resetting W and H.";
			self.W = nnrandn((self.S.data.shape[0], self.nComponents),dtype = self.defaultType);
			self.H = nnrandn((self.nComponents,self.S.data.shape[1]),dtype = self.defaultType);
			return;

		self.W[:,sources] = normalizeMat(nnrand((self.S.data.shape[0], len(sources)),dtype = self.defaultType ));
		self.H[sources,:] = nnrand((len(sources),self.S.data.shape[1]),dtype = self.defaultType);
		
		
	def normalizeParameters(self): 
		#normalizes parameters during optimization
		eps = 1E-15
		PanningSums = array( [[ max(sum(x),eps) for x in self.Q.T]]); 
		self.Q /= dot(ones((self.S.data.shape[2],1)),PanningSums);
		self.W *= dot(ones((self.S.data.shape[0],1)),PanningSums);

		DSPSums = array( [[ max(sum(x),eps) for x in self.W.T]]); 
		self.W /= dot(ones((self.W.shape[0],1)),DSPSums);
		self.H *= dot(DSPSums.T,ones((1,self.S.data.shape[1])));
   
			   
	def getModel(self, rangeF=[], rangeT=[], sources = None,otherSources = None,oldModel = None,hdf5=True,eps = 1E-15):
		#computes estimate
		if not sources:
			sources = range(0,self.nComponents);
		
		if not len(rangeF) : rangeF = array(range(self.S.data.shape[0]));		
		if not len(rangeT) : rangeT = array(range(self.S.data.shape[1]));
		
		if oldModel is None:	
			if not hdf5:
				oldModel = buffer((len(rangeF),len(rangeT),self.S.data.shape[2]))
			else:
				oldModel = hdf5Buffer('float32',(len(rangeF),len(rangeT),self.S.data.shape[2]))
		else:
			if not hdf5:
				oldModel.data = zeros((len(rangeF),len(rangeT),self.S.data.shape[2]))
			else:
				oldModel.data[:,:,:]=oldModel.data[:,:,:]*0
		if eps: oldModel.data[:,:,:] = oldModel.data[:,:,:]+eps
		
		if len(sources):
			
			W = self.W[ix_(rangeF,sources)]
			H = self.H[ix_(sources,rangeT)]
			
			for index in range(0, self.S.data.shape[2]):
				#B = diag(self.Q[index,sources]);
				oldModel.data[:,:,index] += dot(W*self.Q[index,sources],H);
			
		if self.otherSources != None :
			if  otherSources == None: 
				otherSources = hdf5Buffer('complex',(len(rangeF),len(rangeT),self.S.data.shape[2]))
				otherSources.data[:,:,:] += self.otherSources.data[ix_(rangeF,rangeT,range(self.S.data.shape[2]),range(self.otherSources.data.shape[3]))];
			oldModel.data[:,:,:] += sum(otherSources[:,:,:]*self.otherSourcesAmp,axis = 3)

		return oldModel    
		
	def iterate(self, nIter):
		def phi(beta):
			if beta < 1 : res = 1/(2-beta);
			elif 1 <= beta <= 2 : res = 1;
			elif beta > 2 : res = 1/(beta-1);
			return res;
			
		def correlationGradient(Data, target=None, sources=None,relative=False):
			#computes the independance gradient for row 'target'
			#considering rows 'sources'. 
			# * relative indicates wheter sources corresponds to absolute 
			#   or relative rows indices
			# * if no target or sources are specified, computes complete
			#   gradient (same size as H)
			#-------------------------------------------------------------
			R,T = Data.shape;
			if not sources:sources = range(R);
			if not target : target = range(R);
			nTarget = len(target);
			Gp = zeros((nTarget,T));
			Gn = zeros((nTarget,T));
			RHO = dot(Data,Data.T);
			normData = sqrt(diag(RHO));
			RHO = RHO*outer(1/normData,1/normData);
			HRatio = outer(normData,1/normData);
			normData.shape = (normData.shape[0],1);			
			for index in range(nTarget):
				if not sources: currentSources = range(R);
				elif relative : currentSources = [x for x in range(R) and target[index]+sources];
				else : currentSources = sources;
				Gp[index,:] = 1/(normData[target[index]]**2)*(2*dot(HRatio[ix_([target[index]],currentSources)],Data[ix_(currentSources,range(T))]) - Data[target[index],:]);
				Gn[index,:] = Data[ix_([target[index]],range(T))]/(normData[target[index]]**2)*(2*sum(RHO[target[index],:])-RHO[target[index],target[index]]);
			return Gp,Gn;
		eps = 1E-10    
		
		#checking weighting matrix coefficients shape if option is enabled
		if self.weighting is not None:
			if self.S.data.shape != self.weighting.data.shape: 
				print "dimensions of weighting matrix inconsistent with signal. Ignoring." ; raise error
			
		
		#checking fixed sources shape if option is enabled
		if self.otherSources != None :
			self.otherSourcesAmp = ones((self.otherSources.shape[3]))
			if len(self.otherSources.shape) != 4:
				print "Other sources must be a 4D ndarray : Freq*frames*channel*sources. Aborting"; raise error;
			if self.otherSources.shape[0:3] != self.S.data.shape: 
				print "Other sources must have the same size as the data. Aborting"; raise error
		

		#checking batch blocks size. If batchShape is None, then computes on
		# whole signal
		weightingCropped = None
		otherSourcesCropped = None
		nBins,nFrames,nChans = obsDim = self.S.data.shape
		
		if not self.batchShape : 
			batchShape = (self.S.data.shape[0],self.S.data.shape[1])
			croppedShape = self.S.data.shape
			rangeF = xrange(batchShape[0])
			rangeT = xrange(batchShape[1])
			observationCropped = self.S.data
			if self.weighting is not None : weightingCropped = self.weighting.data
			if self.otherSources is not None: otherSourcesCropped = self.otherSources
		else :
			batchShape = [min(self.batchShape[x],obsDim[x]) for x in (0,1)]
			croppedShape = [x for x in batchShape]
			croppedShape.append(self.S.data.shape[2])			
			outOfBatch = [obsDim[x] - batchShape[x] for x in (0,1)]
			self.nBatchs = max(1,min(self.nBatchs, min(outOfBatch)))
			observationCropped = zeros(croppedShape);
			if self.weighting is not None : weightingCropped = zeros(croppedShape);
			if self.otherSources is not None: otherSourcesCropped = zeros(croppedShape)
		
		
		#Creates temporary variables for updates
		Wnum   = zeros((batchShape[0], self.W.shape[1]),dtype = self.defaultType);
		Wdenum = zeros((batchShape[0], self.W.shape[1]),dtype = self.defaultType) + eps;
		Hnum   = zeros((self.H.shape[0],batchShape[1]),dtype = self.defaultType);
		Hdenum = zeros((self.H.shape[0],batchShape[1]),dtype = self.defaultType) + eps;
		mixtureModel = None
		
		
		
		#main loop
		for iter in range(0,nIter):
		
			# regularly force garbage collection
			if not (iter%10) : gc.collect();
				
			# changing beta according to the cooling scheme
			if (iter+1)>=nIter/4.0 : beta=self.beta[1] 
			else : beta = self.beta[0];
			
			#if batch learning is activated, draw randomly current learning points
			if self.batchShape:
				ratio = 1
				
				while ratio > 0.2:
					self.batchShape = [min(self.batchShape[dim],obsDim[dim]) for dim in (0,1)]
					
					blocksLength = zeros((self.nBatchs+1,2),dtype=int)
					if self.nBatchs > 1:
						for dim in (0,1): blocksLength[1:-1,dim] = sort(random.sample(xrange(1,batchShape[dim]),self.nBatchs-1))
					blocksLength[-1,:] = batchShape
					blocksLength[1:,:] = diff(blocksLength,axis=0)
					innerStart = cumsum(blocksLength,axis=0)
					
					blocksSeparation = zeros((self.nBatchs+2,2),dtype=int)
					for dim in (0,1): blocksSeparation[1:-1,dim] = sort(random.sample(xrange(0,obsDim[dim] - batchShape[dim]+1),self.nBatchs))
					blocksSeparation[-1,:] = [nBins-batchShape[0],nFrames-batchShape[1]]
					blocksSeparation = diff(blocksSeparation,axis=0)
					cumulatedBlocksSeparation = cumsum(blocksSeparation,axis=0)
					
					startPoints = [[cumulatedBlocksSeparation[batch,dim]+innerStart[batch,dim] for batch in xrange(self.nBatchs)] for dim in (0,1)]
					
					rangeF = []
					rangeT = []
					
					for batchF, startF in enumerate(startPoints[0]):
						rangeF = concatenate((rangeF, xrange(startF,startF+blocksLength[batchF+1,0]))).astype(int)
						
						for batchT, startT in enumerate(startPoints[1]):
							if batchF==0:
								rangeT = concatenate((rangeT, xrange(startT,startT+blocksLength[batchT+1,1]))).astype(int)
							
							observationCropped[innerStart[batchF,0]:innerStart[batchF+1,0],innerStart[batchT,1]:innerStart[batchT+1,1],:] = self.S.data[startF:startF+blocksLength[batchF+1,0],startT:startT+blocksLength[batchT+1,1]]
							
							if self.weighting is not None:
								weightingCropped[innerStart[batchF,0]:innerStart[batchF+1,0],innerStart[batchT,1]:innerStart[batchT+1,1],:] = self.weighting.data[startF:startF+blocksLength[batchF+1,0],startT:startT+blocksLength[batchT+1,1]]
							if self.otherSources is not None : 
								otherSourcesCropped[innerStart[batchF,0]:innerStart[batchF+1,0],innerStart[batchT,1]:innerStart[batchT+1,1],:] = self.otherSources.data[startF:startF+blocksLength[batchF+1,0],startT:startT+blocksLength[batchT+1,1]]
					rangeF = array(rangeF)
					rangeT = array(rangeT)
					
					ratio = sum((observationCropped.flatten()==0).astype(float))/len(observationCropped.flatten())
			
			observationCropped = maximum(observationCropped,1E-15)
			
			#update mixture model
			mixtureModel = self.getModel(rangeF,rangeT, otherSources = otherSourcesCropped,oldModel = mixtureModel,hdf5=False)
			
			#display
			print "     NMF : iteration " + str(iter+1) + "/" + str(nIter);

			if self.nComponents:
				# Updating W
				Wnum   = 0;
				Wdenum = eps;
				H = self.H[ix_(range(self.H.shape[0]),rangeT)]
				for index_canal in range(0,self.S.data.shape[2]):
					scaledH = (array([self.Q[index_canal,:]]).T*H).T
					Wnum += dot( (mixtureModel.data[:,:,index_canal]**(beta-2)) * observationCropped[:,:,index_canal] , scaledH);
					
					if not self.weighting: 
						Wdenum += dot( mixtureModel.data[:,:,index_canal]**(beta-1) , scaledH);
					else:
						Wdenum += dot( weightingcropped[:,:,index_canal] * mixtureModel.data[:,:,index_canal]**(beta-1) , scaledH );

				if self.wCorrConstraint:
					wPositiveCorrGradient, wNegativeCorrelationGradient = correlationGradient(self.W[ix_(rangeF,range(self.W.shape[1]))].T);
					Wnum += wNegativeCorrelationGradient.T*self.wCorrConstraint
					Wdenum += wPositiveCorrelationGradient.T*self.wCorrConstraint
				
				self.W[ix_(rangeF,range(self.W.shape[1]))] *= (Wnum/Wdenum)**phi(beta);
			
				#Updating H
				mixtureModel = self.getModel(rangeF,rangeT,otherSources = otherSourcesCropped,oldModel = mixtureModel,hdf5=False)

				Hnum   = 0;
				Hdenum = eps;
				W = self.W[ix_(rangeF,range(self.W.shape[1]))]
				for index_canal in range(0,self.S.data.shape[2]):
					scaledW = (W * self.Q[index_canal,:] ).T
					
					Hnum += dot( scaledW, (mixtureModel.data[:,:,index_canal]**(beta-2)) * observationCropped[:,:,index_canal]);
					if not self.weighting: 
						Hdenum += dot( scaledW, mixtureModel.data[:,:,index_canal]**(beta-1) );
					else:
						Hdenum += dot( scaledW, weightingcropped[:,:,index_canal] * mixtureModel.data[:,:,index_canal]**(beta-1) );

				if self.hCorrConstraint:
					hPositiveCorrelationGradient, hNegativeCorrelationGradient = correlationGradient(self.H[ix_(range(self.H.shape[0]),rangeT)])
					Hnum += hNegativeCorrelationGradient*self.hCorrConstraint
					Hdenum += hPositiveCorrelationGradient*self.hCorrConstraint
				self.H[ix_(range(self.H.shape[0]),rangeT)] *= (Hnum/Hdenum)**phi(beta)			
				
				#Updating Q
				mixtureModel = self.getModel(rangeF,rangeT,otherSources = otherSourcesCropped,oldModel = mixtureModel,hdf5=False,eps=1E-15)
				WT = self.W[ix_(rangeF,range(self.W.shape[1]))].T
				HT = self.H[ix_(range(self.H.shape[0]),rangeT)].T
				if self.qCorrConstraint:
					qPositiveCorrelationGradient, qNegativeCorrelationGradient = correlationGradient(self.Q)
					Qnum = qNegativeCorrelationGradient*self.qCorrConstraint
					Qdenum = qPositiveCorrelationGradient*self.qCorrConstraint
				else:
					Qnum = zeros(self.Q.shape)
					Qdenum = zeros(self.Q.shape)
					
				
				for index_canal in range(0,self.S.data.shape[2]):
					Temp = dot(WT,mixtureModel.data[:,:,index_canal]**(beta-2)*observationCropped[:,:,index_canal]);
					Qnum_temp = dot(Temp, HT)

					if not self.weighting:
						Temp = dot( WT , mixtureModel.data[:,:,index_canal]**(beta-1));
					else :
						Temp = dot( WT, weightingcropped[:,:,index_canal] * mixtureModel.data[:,:,index_canal]**(beta-1));
					Qdenum_temp = eps + dot( Temp , HT)
					
					Qnum[index_canal,:] += diag(Qnum_temp)
					Qdenum[index_canal,:] += diag(Qdenum_temp)
				self.Q = self.Q*( (Qnum / Qdenum)**phi(beta)) ;
				#print "nanQ : " + str(sum(isnan(self.Q.flatten()).astype(int)))
				#print "nanQnum : " + str(sum(isnan(Qnum.flatten()).astype(int)))
				#print "nanQdenum : " + str(sum(isnan(Qdenum.flatten()).astype(int)))
				
			#if we tune other sources amplitudes
			if self.tuneOtherSourcesAmp:
				for current_source in range(self.otherSources.shape[3]):
					mixtureModel = self.getModel(rangeF,rangeT,otherSources = otherSourcesCropped,oldModel = mixtureModel);
					otherSourcesNum = sum((mixtureModel.data[:,:,:]**(beta-2) * Vcropped * otherSourcesCropped[:,:,:,current_source]).flatten());
					if not self.weighting:
						otherSourcesDenum = sum((mixtureModel[:,:,:]**(beta-1)  *otherSourcesCropped[:,:,:,current_source] ).flatten())
					else:
						otherSourcesDenum = sum((weightingcropped * mixtureModel[:,:,:]**(beta-1)*otherSourcesCropped[:,:,:,current_source] ).flatten())					
					self.otherSourcesAmp[current_source] *=  (otherSourcesNum / otherSourcesDenum ) ** phi(beta)
			
			#print "infW : " + str(sum(isinf(self.W.flatten()).astype(int))) + " infH : " + str(sum(isinf(self.H.flatten()).astype(int))) + "nanQ : " + str(sum(isinf(self.Q.flatten()).astype(int)))
			self.normalizeParameters();
  			#print "zero Q : " + str(sum((self.Q.flatten()==0).astype(int)))
			#print "zero W : " + str(sum(isnan(self.W.flatten()==0).astype(int)))
			#print "zero H : " + str(sum(isnan(self.H.flatten()==0).astype(int)))
			#print "infW : " + str(sum(isinf(self.W.flatten()).astype(int))) + " infH : " + str(sum(isinf(self.H.flatten()).astype(int))) + "nanQ : " + str(sum(isinf(self.Q.flatten()).astype(int)))
