/*============================================================================
  File:      lrpmodel.cpp
 
  Summary:   Implementation of the pair-wise linear regression model.
 
  Date:		 June 30, 2004
------------------------------------------------------------------------------
  This file is part of the Microsoft SQL Server Code Samples.
 
  Copyright (C) 2003 Microsoft Corporation.  All rights reserved.
 
This source code is intended only as a supplement to Microsoft
Development Tools and/or on-line documentation.  See these other
materials for detailed information regarding Microsoft code samples.
 
THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
PARTICULAR PURPOSE.
============================================================================*/

#include "stdafx.h"
#include "lrpmodel.h"
#include <math.h>
#include "DataValues.h"
#include "lrsstatreader.h"

// Log Gamma function 

static DBL		DblLogGamma(DBL dblX)
{
	const DBL dblC1 = 0.918938533204673;
	const DBL dblC2 = 0.000595238095238;
	const DBL dblC3 = 0.000793650793651;
	const DBL dblC4 = 0.002777777777778;
	const DBL dblC5 = 0.083333333333333;

	DBL dblF;
	DBL dblZ;

	if (dblX < 7.0)
	{
		dblF = 1.0;
		for (dblZ = dblX; dblZ < 7.0; dblZ += 1.0)
		{
			dblX = dblZ;
			dblF *= dblZ;
		}
		dblX += 1.0;
		dblF = -log(dblF);
	}
	else
	{
		dblF = 0.0;
	}
	dblZ = 1.0 / (dblX * dblX);
	
	return dblF + log(dblX) * (dblX - 0.5) - dblX + dblC1 + 
			(((-dblC2 * dblZ + dblC3) * dblZ - dblC4) * dblZ + dblC5) / dblX;
				
}

// Bayesian selection score: log p(X,Y) - log p(X) - log p(Y)

static DBL	DblSelectionScore(DBL dblSumXX, DBL dblSumYY, DBL dblSumXY, 
					  DBL dblSumX, DBL dblSumY,DBL dblN)
{
	////
	//
	// Create the posterior matrix assuming that we both shift and scale
	//
	////

	// Transform into sample covariances

	DBL	dblMeanX = dblSumX / dblN;
	DBL	dblMeanY = dblSumY / dblN;

	DBL dblCovXX = dblSumXX / dblN - dblMeanX * dblMeanX;
	DBL dblCovYY = dblSumYY / dblN - dblMeanY * dblMeanY;
	DBL dblCovXY = dblSumXY / dblN - dblMeanX * dblMeanY;
		
	// Shifted and scaled posterior covariance: 
	// We assume an ess for the mean = 1, 
	// and thus the ess for the covariance = 3
								
	DBL dblPostXX = dblN + 1.0;
	DBL dblPostYY = dblPostXX;
	DBL dblPostXY = dblN * dblCovXY / (sqrt(dblCovXX) * sqrt(dblCovYY));

	// Get the determinate of the posterior covariance matrix, take the log, 
	// and raise to the power -(3 + N) / 2

	DBL dblScoreD = -((3.0 + dblN) / 2) * log(dblPostXX * dblPostYY - dblPostXY * dblPostXY);

	// Subtract the denominator 

	dblScoreD -= -(2.0 + dblN) * log(dblPostXX); // Using fact that dblPostYY = dblPostXX

	
	const DBL dblLogGamma3over2 = ::DblLogGamma(3.0 / 2.0);

	// "Part B"

	DBL dblScoreB =	::DblLogGamma((3.0 + dblN) / 2.0) -
					::DblLogGamma((2.0 + dblN) / 2.0) -
					dblLogGamma3over2;

	
	return dblScoreD + dblScoreB;

}

HRESULT	LRPMODEL::PopulateModel(LRSSTATREADER& lrsstatreader, DBL	dblMinDepScore)
{
	_cAttribute	= lrsstatreader._cAttribute;

	// We populate two sets of parameters here: one for viewing, in which 
	// every output attribute is the target of a regression on every input
	// attribute, and another for naive-Bayes inference, in which every output
	// variable is the regressor in a regression for every input attribute.
	
 	CHECK_STL_MEM(_vvlrparamView.resize(_cAttribute));
	CHECK_STL_MEM(_vvlrparamPredict.resize(_cAttribute));
 
	_cCase = lrsstatreader._cCase;

	_viAttributeOutput = lrsstatreader._viAttributeOutput; 

	// Following is the OLS method to get the parameters. Let m and b be the slope and
	// offset, respectively. Let R, T, RR, RT, TT be the relevant sums, where R and T
	// denote regressor and target, respectively.
	//
	//	  B	  =		X^{-1}   *    Y
	//
	//
	//  | b	|	| N  R  |-1   | T  |
	//	|	| = |       |	* |	   |
	//	| m	|	| R  RR |	  | TR |

	//
	//		  |	      RR		      -R		 |	
	//		  |	 -------------	  -------------	 |	
	//		  |  N*RR - R*R		  N*RR - R*R	 | 
	//		  |									 | 		| T  | 
	//		= |	      							 |	* 	|	 | 
	//		  |	     -R       	       N       	 |		| TR | 
	//		  |	 ------------- 	  -------------  |	
	//		  |	 N*RR - R*R 	  N*RR - R*R     |	

	//
	//  
	//		    RR * T - R * TR		
	//	b =	    -------------------	
	//			   N*RR - R*R	
	//
	//
	//  	      N * TR - R * T    
	//	m =     -------------------
	//		       N*RR - R*R	
	//
	// The variance is computed as  (I'm not really sure why we add the extra -1)
	//
	//	sigma^2 = (TT - [b m] * X * [b m]') / (N - #regressors - 1)
	//
	//			= (TT - b^2 N - 2 (b)(m)(R) - m^2 (RR) ) / (N - 2)		

	
	for (UINT iiAttributeOut = 0; iiAttributeOut < lrsstatreader._ciAttributeOutput; iiAttributeOut++)
	{
		UINT iAttributeOut = lrsstatreader._viAttributeOutput[iiAttributeOut];
	
		CHECK_STL_MEM(_vvlrparamView[iAttributeOut].reserve(lrsstatreader._ciAttributeInput));
		CHECK_STL_MEM(_vvlrparamPredict[iAttributeOut].reserve(lrsstatreader._ciAttributeInput));

 		DBL dblO	= lrsstatreader._vdblSum[iAttributeOut];
		DBL dblOO	= lrsstatreader._vdblSumSquares[iAttributeOut];
		
		for (UINT iiAttributeIn = 0; iiAttributeIn < lrsstatreader._ciAttributeInput; iiAttributeIn++)
		{
			UINT iAttributeIn = lrsstatreader._viAttributeInput[iiAttributeIn];

			// Don't regress on self

			if (iAttributeIn == iAttributeOut)
			{
				continue; 
			}
		
			DBL dblI	= lrsstatreader._vdblSum[iAttributeIn];
			DBL dblII	= lrsstatreader._vdblSumSquares[iAttributeIn];
			// Note: the sum-of-products is indexed by iiAtribute, not iAttribute!
			DBL dblIO	= lrsstatreader._vvdblSumProducts[iiAttributeOut][iiAttributeIn];
		
			DBL dblSelectionScore = 
				DblSelectionScore(dblII,dblOO,dblIO, dblI, dblO, _cCase);

			if (dblSelectionScore < dblMinDepScore)
			{
				// The pair-wise dependency is not big enough for us to include
				// in the model.

				continue;
			}

			// Get the parameters for both the Input -> Output regression (viewing)
			// and the Output -> Input regression (inference)
            
			DBL dblDenomView	= _cCase * dblII - dblI*dblI;
			DBL dblDenomPred		= _cCase * dblOO - dblO*dblO;

			if (dblDenomView < 0.000000000001 || dblDenomPred < 0.000000000001)
			{
				continue;
			}

			DBL	dblBView	= (dblII * dblO - dblI * dblIO) / dblDenomView;
			DBL	dblBPred		= (dblOO * dblI - dblO * dblIO) / dblDenomPred;

			DBL dblMView	= (_cCase * dblIO - dblI * dblO) / dblDenomView;
			DBL dblMPred		= (_cCase * dblIO - dblO * dblI) / dblDenomPred;

			DBL dblVarView	= (dblOO - dblBView*dblBView*_cCase - 2*dblBView*dblMView*dblI - dblMView*dblMView*dblII)
							/ (_cCase - 2);

			DBL dblVarPred	= (dblII - dblBPred*dblBPred*_cCase - 2*dblBPred*dblMPred*dblO - dblMPred*dblMPred*dblOO)
							/ (_cCase - 2);
			
			
			LRPARAM lrparamView;

			lrparamView._dblScore		= dblSelectionScore;
			lrparamView._dblSlope		= dblMView;	
			lrparamView._dblIntercept	= dblBView;
			lrparamView._dblSD			= sqrt(dblVarView);
			
			lrparamView._iAttributeRegressor	= iAttributeIn;
			lrparamView._iAttributeTarget		= iAttributeOut;

			CHECK_STL_MEM(_vvlrparamView[iAttributeOut].push_back(lrparamView));

			LRPARAM lrparamPredict;

			lrparamPredict._dblScore		= dblSelectionScore;
			lrparamPredict._dblSlope		= dblMPred;	
			lrparamPredict._dblIntercept	= dblBPred;
			lrparamPredict._dblSD			= sqrt(dblVarPred);
			
			lrparamPredict._iAttributeRegressor	= iAttributeOut;
			lrparamPredict._iAttributeTarget	= iAttributeIn;

			CHECK_STL_MEM(_vvlrparamPredict[iAttributeOut].push_back(lrparamPredict));

		}
	}

		
	// Fill in the sample means and variances 

	CHECK_STL_MEM(_vdblSampleSD.resize(_cAttribute));
	CHECK_STL_MEM(_vdblSampleMean.resize(_cAttribute));

	DBL dblDenom = _cCase * (_cCase - 1);

	for (ULONG iAttribute = 0; iAttribute < _cAttribute; iAttribute++)
	{
		// Use the "unbiased" sample variance:
		//
		// Var = [n \sum X^2 - (\sum X)^2] / (n * (n-1))

		DBL dblSum		= lrsstatreader._vdblSum[iAttribute];
		DBL dblSumSq	= lrsstatreader._vdblSumSquares[iAttribute];

		_vdblSampleSD[iAttribute]
			= sqrt((_cCase * dblSumSq - dblSum * dblSum)	/ dblDenom);

		_vdblSampleMean[iAttribute] = dblSum / _cCase;
	}

	// Compute the posterior SD for each variable. We will cache this away for 
	// more efficient inference.
	//
	// PostSD = 1 / [ 1/PriorSD(Y) + \sum_{X_i} m_i^2/sd_i ]
	//
	// Y above refers to the target variable for the posterior, and X_i refers to 
	// an "input" variable. What makes things a little confusing is that the m_i 
	// (i.e. slope) values are obtained when Y is the INPUT variable for the pair-wise
	// regression between Y and X_i.
	
	CHECK_STL_MEM(_vdblPostSD.resize(_cAttribute));

	for (UINT iiAttributeOut = 0; iiAttributeOut < lrsstatreader._ciAttributeOutput; iiAttributeOut++)
	{
		UINT iAttributeOut = lrsstatreader._viAttributeOutput[iiAttributeOut];
	
		// Accumulate the inverse of PostSD
	
		DBL dblPostSDInv	= 1.0 / _vdblSampleSD[iAttributeOut];

		UINT clrparam = (UINT) _vvlrparamPredict[iAttributeOut].size();

		for (UINT ilrparam = 0; ilrparam < clrparam; ilrparam++)
		{
			LRPARAM& lrparam = _vvlrparamPredict[iAttributeOut][ilrparam];

			assert(lrparam._iAttributeRegressor == iAttributeOut);

			dblPostSDInv += 
				(lrparam._dblSlope * lrparam._dblSlope) / lrparam._dblSD;
		}

        _vdblPostSD[iAttributeOut] = 1.0 / dblPostSDInv;
	}
	
	_vdblMin = lrsstatreader._vdblMin;
	_vdblMax = lrsstatreader._vdblMax;

	return S_OK;
}

 
HRESULT		LRPMODEL::ExtractPosterior(const VDBL&	vdblValueDense,		// [iAttribute]
								       ULONG		iAttribute,
									   DBL&			dblPostMean,
									   DBL&			dblPostSD)
{
	if (vdblValueDense[iAttribute] == dblMissing)
	{ 
		// Our model does not work with missing values

		return E_FAIL;
	}

	// Using Naive-Bayes inference: when predicting target Y, we take all models
	// of the form X_i = m_i*Y + b_i and assume that all X_i are independent of
	// each other given Y. Note that we do not use any pair-wise regression where
	// Y is the target for this inference.
	//
	// The posterior mean and SD are computed as follows:
	//
	// PostMean = 1/PostSD * [ PriorMean(Y)/ PriorSD(Y) + \sum_{X_i} m_i*(x_i-b_i)/sd_i ]
	//
	// PostSD = 1 / [ 1/PriorSD(Y) + \sum_{X_i} m_i^2/sd_i ]
	//
	// We pre-compute the posterior SD in PopulateModel()

	dblPostMean = _vdblSampleMean[iAttribute] / _vdblSampleSD[iAttribute];

	UINT clrparam = (UINT) _vvlrparamPredict[iAttribute].size();

	for (UINT ilrparam = 0; ilrparam < clrparam; ilrparam++)
	{
		LRPARAM& lrparam = _vvlrparamPredict[iAttribute][ilrparam];
	
		assert(lrparam._iAttributeRegressor == iAttribute);

		DBL dblValueTarget = vdblValueDense[lrparam._iAttributeTarget];

		if (dblValueTarget == dblMissing)
		{
			// Our model does not work with missing values

			return E_FAIL;
		}

		dblPostMean += 
			lrparam._dblSlope * 
			(dblValueTarget - lrparam._dblIntercept) / lrparam._dblSD;
	}

	dblPostMean *= _vdblPostSD[iAttribute];
	dblPostSD = _vdblPostSD[iAttribute];

	return S_OK;
}
								


