-- An implement for restricted Boltzmann machine 
-- currently support only binary input and binary hidden units

require 'xlua'
p = xlua.Profiler()

require 'cutils'
require 'utils'

CtxRBM = {}
CtxRBM_mt = { __index = CtxRBM }

-- construction
function CtxRBM:new( cwemb, nctxwords, hiddim, compdim, cwlist)
	local mult = mult or 0.01
	local embdim = cwemb:size(2)

	local cwembhid = torch.randn(nctxwords*embdim, hiddim):mul(mult)
	local comphid = torch.randn(compdim, hiddim):mul(mult)
	local hidbias = torch.randn(hiddim, 1):mul(mult)
	local cwembbias = torch.randn(nctxwords*embdim, 1):mul(mult)
	local compbias = torch.randn(compdim, 1):mul(mult)

	local model = {
			embdim		= embdim,
			hiddim		= hiddim,
			cwlist		= cwlist,

			cwemb		= cwemb,
			comphid		= comphid,
			compbias	= compbias,
			cwembhid	= cwembhid,
			cwembbias	= cwembbias,
			hidbias		= hidbias,
			nctxwords	= nctxwords }

	setmetatable(model, CtxRBM_mt)
	return model
end

-- save to file
function CtxRBM:save_to_file( path )
	local f = torch.DiskFile(path, 'w')
	f:binary()
	f:writeObject(self)
	f:close()
end

-- load from a file
function CtxRBM:load_from_file( path )
	local f = torch.DiskFile(path, 'r')
	f:binary()
	model = f:readObject()
	f:close()
	setmetatable(model, CtxRBM_mt)
	return model
end

function CtxRBM:compute_ctx_embs( cws )
	local embdim = self.embdim
	local cembs = torch.zeros(embdim * self.nctxwords, cws:size(2))
	for i = 1,nctxwords do
		cembs[{{(i-1)*embdim+1,i*embdim},{}}]
			:copy(self.cwemb:index(1, cws[{i,{}}]:long()):t())
	end

	return cembs
end

-- sample hidden units from visible units
-- v is a matrix, each column is a visible datum point
function CtxRBM:compute_hid_probs_states( cws, comp_states )
	local nctxwords = self.nctxwords
	local embdim = self.embdim
	local datasize = cws:size(2)

	local cembs = self:compute_ctx_embs(cws)
	--print(comp_states:size())
	--print(self.comphid:size())
	local probs = sigmoid(
					(self.comphid:t() * comp_states)
					:add(self.cwembhid:t() * cembs)
					:add(torch.repeatTensor(self.hidbias, 1, datasize)) )
	local states = sample_bernoulli(probs):double()
	return probs, states
end

-- sample visible units from hidden units
-- h is a matrix, each column is a hidden state point
function CtxRBM:compute_word_probs_states_mcmc( w, hidemb, wemb,
										chainlen, sampler_storage )
	local datasize = w:size(1)
	local embdim = self.embdim

	local sample = gen_alias_sampling(sampler_storage.ws.alias,
					sampler_storage.ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(w:long())
	sample:resize(chainlen*datasize)
	local prob = sampler_storage.w_prob:index(1,sample)
					:resize(chainlen,datasize)
	local emb = wemb:index(1, sample):resize(chainlen, datasize, embdim)
					:transpose(1,2)
	local id = torch.linspace(1,datasize,datasize):long()
	local ratios = multi_mv(emb, id, hidemb:t(), id):t():exp():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	
	return nil, sample[{-1,{}}]:double()
end

function CtxRBM:compute_word_probs_states(hidemb, wemb)
	local datasize = hidemb:size(2)
	local probs = safe_compute_softmax(wemb * hidemb)
	local states = roulette_sampling(probs)
	return probs, states
end

function CtxRBM:compute_vis_probs_states( cws, comp_states, hid_states, 
										chainlen , sampler_storage )

	local nctxwords = self.nctxwords
	local embdim = self.embdim
	local datasize = cws:size(2)

	-- sampling ctx words
	local new_cws = torch.Tensor(cws:size())
	local hidembs = (self.cwembhid * hid_states)
					:add(torch.repeatTensor(self.cwembbias, 1, datasize))
	local cwembs = self.cwemb:index(1, self.cwlist:long())

	for i = 1,nctxwords do
		local hidemb = hidembs[{{(i-1)*embdim+1,i*embdim},{}}]
--[[	local probs, states = self:compute_word_probs_states(hidemb, cwembs)
		new_cws[{i,{}}]:copy(self.cwlist:index(1, states)) ]]

		local _, cw = self:compute_word_probs_states_mcmc(
									cws[{i,{}}],hidemb,self.cwemb,chainlen, 
									{ ws ={alias=sampler_storage.cw_ws.alias,
										prob =sampler_storage.cw_ws.prob },
									w_prob = sampler_storage.cw_prob })
		new_cws[{i,{}}]:copy(cw)
	end

	-- sampling comp
	local comp_probs = sigmoid(
					(self.comphid * hid_states * 2)
					:add(torch.repeatTensor(self.compbias, 1, datasize)))
	local new_comp_states = sample_bernoulli(comp_probs):double()

	return _, new_cws, comp_probs, new_comp_states
end

-- compute the change in params, using 1-step Contrastive Divergence (CD1)
-- data is a matrix which each column is a datum point
function CtxRBM:compute_grads( data, chainlen, sampler_storage, cd_niters)
	local cd_niters = cd_niters or 1
	local datasize = data:size(2)
	local nctxwords = self.nctxwords
	local embdim = self.embdim
	local hiddim = self.hiddim 

	--=============== cd-n ==============--
	-- positive phase
	local pos_cws = data[{{1,nctxwords},{}}]
	local pos_comp_probs = data[{{nctxwords+1,-1}}]
	local pos_comp_states = sample_bernoulli(pos_comp_probs):double()
	local pos_hid_probs, pos_hid_states 
					= self:compute_hid_probs_states(pos_cws, pos_comp_states)

	-- negative phase
	local neg_hid_probs = pos_hid_probs
	local neg_hid_states = pos_hid_states
	local neg_cws = pos_cws
	local neg_comp_states = pos_comp_states
	local neg_comp_probs
	
	for iter = 1, cd_niters do
		_, neg_cws, neg_comp_probs, neg_comp_states = 
					self:compute_vis_probs_states(neg_cws, 
									neg_comp_states, neg_hid_states, 
									chainlen, sampler_storage)
		neg_hid_probs, neg_hid_states =
					self:compute_hid_probs_states(neg_cws, neg_comp_states)
	end

	--============== compute grads ==============--
	local pos_h = pos_hid_probs
	local neg_h = neg_hid_probs
	local pos_comp = pos_comp_probs
	local neg_comp = neg_comp_probs

	local pos_cembs = self:compute_ctx_embs(pos_cws)
	local neg_cembs = self:compute_ctx_embs(neg_cws)

	local delta_cwembhid = (pos_cembs*pos_h:t() - neg_cembs*neg_h:t())
							:div(datasize)

	-- compute delta cwemb
	local delta_cwemb = {}
	local pos_hidcwemb = (self.cwembhid * pos_h)
						:add(torch.repeatTensor(self.cwembbias, 1, datasize))
	local neg_hidcwemb = (self.cwembhid * neg_h)
						:add(torch.repeatTensor(self.cwembbias, 1, datasize))
	for j = 1,datasize do
		local temp0 = pos_hidcwemb[{{},{j}}]:t()
		local temp1 = neg_hidcwemb[{{},{j}}]:t()
		for i = 1,nctxwords do
			local id0 = pos_cws[{i,j}]
			local id1 = neg_cws[{i,j}]
			local range = {{},{(i-1)*embdim+1, i*embdim}}
			if delta_cwemb[id0] == nil then 
				delta_cwemb[id0] = torch.zeros(embdim)
			end 
			delta_cwemb[id0]:add(temp0[range])

			if delta_cwemb[id1] == nil then 
				delta_cwemb[id1] = torch.zeros(embdim)
			end
			delta_cwemb[id1]:add(-temp1[range])
		end
	end
	for _,delta in pairs(delta_cwemb) do
		delta:mul( 1. / datasize )
	end

	-- compute delta comphid
	local delta_comphid = (pos_comp * pos_h:t() - neg_comp *  neg_h:t() )
							:div(datasize)

	-- compute delta biases
	local delta_cwembbias = (pos_cembs - neg_cembs):sum(2):div(datasize)
	local delta_hidbias = (pos_h - neg_h):sum(2):div(datasize)
	local delta_compbias = (pos_comp - neg_comp):sum(2):div(datasize)

	-- compute reconstruction error
	local err = torch.zeros(nctxwords+1)
	for i = 1,nctxwords do
		err[{i}] = 	torch.ne(pos_cws[{i,{}}], neg_cws[{i,{}}]):double()
					:sum()/datasize
	end
	err[{-1}] = (pos_comp - neg_comp):abs():sum() / datasize

	if math.mod(batch, nbatch_to_show) == 0 then
		--print('--------------')
		--print(pos_comp[{{},{1,2}}]:t() - neg_comp[{{},{1,2}}]:t())
		print(pos_h:sum() / pos_h:numel())
		print(neg_h:sum() / neg_h:numel())
	end

	return delta_comphid, delta_cwembhid, delta_compbias, 
			delta_cwembbias, delta_hidbias, delta_cwemb, err
end

batch = 0
nbatch_to_show = 100

-- training
-- data is a matrix, each column is a visible data point
function CtxRBM:train( data, nepoch, batchsize, init_momentum, final_momentum,
								eps, weightcost, chainlen, sampler_storage )
	local datasize = data:size(2)
	local inc_comphid	= torch.zeros( self.comphid:size() )
	local inc_compbias	= torch.zeros( self.compbias:size() )
	local inc_cwembhid	= torch.zeros( self.cwembhid:size() )
	local inc_cwembbias	= torch.zeros( self.cwembbias:size() )
	local inc_hidbias	= torch.zeros( self.hidbias:size() )
	local inc_cwemb		= torch.zeros( self.cwemb:size() )

	local momentum = init_momentum
	
	for epoch = 1,nepoch do
		print('=== epoch ' ..  epoch .. '  ===')

		--p:start('epoch')
		local total_err = 0
		if epoch > 3 then
			momentum = final_momentum
		end	

		batch = 0
		local total_err = torch.zeros(self.nctxwords + 1)
		while true do
			--print('batch ' .. batch)
			-- extract a batch
			batch = batch + 1
			local startid_batch = (batch-1) * batchsize + 1
			local endid_batch = math.min(batch*batchsize, datasize)
			local batchdata = data[{{}, {startid_batch,endid_batch}}]
			
			-- update params
			delta_comphid, delta_cwembhid, delta_compbias, 
			delta_cwembbias, delta_hidbias, delta_cwemb, err  = 
						self:compute_grads(batchdata,chainlen,sampler_storage)

			inc_comphid		:mul(momentum):add(delta_comphid * eps)
								:add(self.comphid * (-eps*weightcost))
			inc_compbias	:mul(momentum):add(delta_compbias * eps)
			inc_cwembhid	:mul(momentum):add(delta_cwembhid * eps)
								:add(self.cwembhid * (-eps*weightcost))
			inc_cwembbias	:mul(momentum):add(delta_cwembbias * eps)
			inc_hidbias		:mul(momentum):add(delta_hidbias * eps)

			inc_cwemb:mul(momentum)
			for i,delta in pairs(delta_cwemb) do
				inc_cwemb[{{i},{}}]:add(delta*eps)
			end
	
			self.comphid	:add(inc_comphid)
			self.compbias	:add(inc_compbias)
			self.cwembbias	:add(inc_cwembbias)
			self.cwembhid	:add(inc_cwembhid)
			self.hidbias	:add(inc_hidbias)
			self.cwemb		:add(inc_cwemb)

			collectgarbage()
			total_err:add(err)

			if math.mod(batch, nbatch_to_show) == 0 then
				print('batch ' .. batch)
				self:save_to_file('ctx_model/' .. epoch .. '.model')
				print(total_err:div(nbatch_to_show))
				total_err:fill(0)

				print('--------------')
				print(self.cwembbias:sum())
				print(self.comphid:sum())
				print(self.compbias:sum())
				print(self.hidbias:sum())

				io.flush()
			end
		
			if endid_batch == datasize then
				break
			end
		end
	end
end

