-- An implement for Factorized Composition Boltzmann Machine (FCBM)
-- currently support only binary input and binary hidden units

require 'xlua'
p = xlua.Profiler()

require 'cutils'
require 'utils'

MultCtxRBM = {}
MultCtxRBM_mt = { __index = MultCtxRBM }

-- construction
function MultCtxRBM:new(emb, nctxwords, hiddim, mult )
	local mult = mult or 0.01
	local embdim = emb:size(2)

	local t_weight	= torch.randn(embdim, hiddim):mul(mult)
	local c_weight	= torch.randn(nctxwords * embdim, hiddim):mul(mult)
	local t_bias	= torch.randn(embdim, 1):mul(mult)
	local c_bias	= torch.randn(nctxwords * embdim, 1):mul(mult)
	local hidbias	= torch.randn(hiddim, 1):mul(0)

	local model = {
			embdim		= embdim,
			hiddim 		= hiddim, 
			nctxwords	= nctxwords,

			t_weight 	= t_weight, 
			c_weight 	= c_weight,
			t_bias 		= t_bias, 
			c_bias 		= c_bias,
			hidbias 	= hidbias,
			temb 		= emb:clone(),
			cemb		= emb
		}

	setmetatable(model, MultCtxRBM_mt)
	return model
end

-- save to file
function MultCtxRBM:save_to_file( path )
	local f = torch.DiskFile(path, 'w')
	f:binary()
	f:writeObject(self)
	f:close()
end

-- load from a file
function MultCtxRBM:load_from_file( path )
	local f = torch.DiskFile(path, 'r')
	f:binary()
	model = f:readObject()
	f:close()
	setmetatable(model, MultCtxRBM_mt)
	return model
end

function MultCtxRBM:compute_embs( ws, wembs )
	local embdim = self.embdim
	local emb = torch.zeros(embdim * ws:size(1), ws:size(2))
	for i = 1,ws:size(1) do
		emb[{{(i-1)*embdim+1,i*embdim},{}}]
			:copy(wembs:index(1, ws[{i,{}}]:long()):t())
	end

	return emb:t()
end

-- sample hidden units from visible units
-- v is a matrix, each column is a visible datum point
function MultCtxRBM:compute_hid_probs_states(tw, cws)
	local dim = self.dim
	local datasize = tw:size(1)

	-- compute probs
	local cembs = self:compute_embs(cws, self.cemb)
	local temb = self.temb:index(1, tw:long())
	local probs = sigmoid(
			(temb * self.t_weight)
			:cmul(cembs * self.c_weight):t()
			:add(torch.repeatTensor(self.hidbias, 1, datasize)))
	local states = sample_bernoulli(probs):double()

	return probs, states
end

-- sample visible units from hidden units
-- h is a matrix, each column is a hidden state point
function MultCtxRBM:compute_cw_probs_states(cw, tw, h, weight, bias, 
											chainlen, sampler_storage )

	local embdim = self.embdim
	local hiddim = self.hiddim
	local datasize = cw:size(1)
	
	local sample = gen_alias_sampling(sampler_storage.cw_ws.alias,
					sampler_storage.cw_ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(cw:long())
	sample:resize(chainlen * datasize)
	local prob = sampler_storage.cw_prob:index(1,sample)
					:resize(chainlen,datasize)

	local cemb = self.cemb:index(1, sample)
	local temb = self.temb:index(1, tw:long())
	local f1 = (cemb * weight)	:resize(chainlen, datasize, hiddim)
								:transpose(1,2)
	local f2 = (temb * self.t_weight):cmul(h:t())
	local id = torch.linspace(1, datasize, datasize):long()
	local b = (cemb * bias):resize(chainlen, datasize):t()
	local ratios = multi_mv(f1, id, f2, id):add(b):exp():t():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)

	return _, sample[{-1,{}}]:double()
end

function MultCtxRBM:compute_tw_probs_states(tw, cws, h, 
							chainlen, sampler_storage )

	local embdim = self.embdim
	local hiddim = self.hiddim
	local datasize = tw:size(1)

	local sample = gen_alias_sampling(sampler_storage.w_ws[1].alias,
					sampler_storage.w_ws[1].prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(tw:long())
	--print(sample[{{1,10},{1,10}}])
	sample:resize(chainlen * datasize)
	local prob = sampler_storage.w_prob[1]:index(1,sample)
					:resize(chainlen,datasize)
	--print(prob[{{1,10},{1,10}}])

	local temb = self.temb:index(1, sample)
	local cemb = self:compute_embs(cws, self.cemb)
	local f1 = (temb * self.t_weight):resize(chainlen, datasize, hiddim)
									:transpose(1,2)
	local f2 = (cemb * self.c_weight):cmul(h:t())
	local id = torch.linspace(1, datasize, datasize):long()
	local b = (temb * self.t_bias):resize(chainlen, datasize):t()
	local ratios = multi_mv(f1, id, f2, id):add(b):exp():t():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	
	--print(ratios[{{1,10},{1,10}}])

	return _, sample[{-1,{}}]:double(), f2
end
	
function MultCtxRBM:sample_words_from_hidden(tw, cws, h, 
							chainlen , sampler_storage )
	local nctxwords = self.nctxwords
	local new_tw = tw:clone()
	local new_cws = cws:clone()

	for iter = 1,2 do
		-- generate target word
		_, new_tw = self:compute_tw_probs_states(new_tw, new_cws, h, 
												chainlen, sampler_storage)
		

		-- generate context words
		for i = 1, nctxwords do
			local range = {{(i-1)*self.embdim+1,i*self.embdim},{}}
			local weight = self.c_weight[range]
			local bias = self.c_bias[range]
			_, new_cw = self:compute_cw_probs_states(
											new_cws[{i,{}}], new_tw, 
											h, weight, bias, 
											chainlen, sampler_storage )
			new_cws[{i,{}}]:copy(new_cw)
		end
	end

	return new_tw, new_cws
end

-- compute the change in params, using 1-step Contrastive Divergence (CD1)
-- data is a matrix which each column is a datum point
function MultCtxRBM:compute_grads(data, chainlen, sampler_storage)
	local datasize = data:size(2)
	local embdim = self.embdim
	local hiddim = self.hiddim 

	-- sampling v0 -> pos_h -> v1 -> neg_h
	local pos_tw = data[{1,{}}]
	local pos_cws = data[{{2,-1},{}}]
	pos_h_probs, pos_h_states = self:compute_hid_probs_states(pos_tw, pos_cws)
	neg_tw, neg_cws = self:sample_words_from_hidden(
								pos_tw, pos_cws, pos_h_states,
								chainlen, sampler_storage)
	neg_h_probs, neg_h_states = self:compute_hid_probs_states(neg_tw, neg_cws)

	-- compute delta fweight
	pos_h = pos_h_probs
	neg_h = neg_h_probs
	
	local pos_temb 	= self.temb:index(1, pos_tw:long())
	local neg_temb 	= self.temb:index(1, neg_tw:long())
	local pos_cembs	= self:compute_embs(pos_cws, self.cemb)
	local neg_cembs	= self:compute_embs(neg_cws, self.cemb)

	local pos_ftemb 	= pos_temb * self.t_weight
	local pos_fcembs 	= pos_cembs * self.c_weight
	local pos_fh 		= pos_h:t() 
	
	local neg_ftemb 	= neg_temb * self.t_weight
	local neg_fcembs 	= neg_cembs * self.c_weight
	local neg_fh 		= neg_h:t()
	
	local delta_t_weight = 
						pos_temb:t() * torch.cmul(pos_fh, pos_fcembs) -
						neg_temb:t() * torch.cmul(neg_fh, neg_fcembs)
	local delta_c_weight = 
						pos_cembs:t() * torch.cmul(pos_fh, pos_ftemb) - 
						neg_cembs:t() * torch.cmul(neg_fh, neg_ftemb)

	delta_t_weight:div(datasize)
	delta_c_weight:div(datasize)

	-- compute delta bias
	local delta_t_bias = (pos_temb - neg_temb):t()	:sum(2):div(datasize)
	local delta_c_bias = (pos_cembs - neg_cembs):t():sum(2):div(datasize)
	local delta_hidbias = (pos_h - neg_h)			:sum(2):div(datasize)

	-- compute delta emb
	local delta_temb = {}
	for i = 1, datasize do
		-- positive 
		local id1 = pos_tw[{i}]
		if delta_temb[id1] == nil then 
			delta_temb[id1] = torch.zeros(embdim) 
		end
		local range = {{i},{}}
		local temp = self.t_weight * 
					torch.cmul(pos_fcembs[range], pos_fh[range]):t()
		delta_temb[id1]:add(temp):add(self.t_bias)

		-- negative
		local id1 = neg_tw[{i}]
		if delta_temb[id1] == nil then 
			delta_temb[id1] = torch.zeros(embdim) 
		end
		local range = {{i},{}}
		local temp = self.t_weight * 
					torch.cmul(neg_fcembs[range], neg_fh[range]):t()
		delta_temb[id1]:add(-temp):add(-self.t_bias)
	end
	for _,delta in pairs(delta_temb) do
		delta:div(datasize)
	end

	-- compute delta cemb
	local delta_cemb = {}
	for j = 1, nctxwords do
		pos_cw = pos_cws[{j,{}}]
		neg_cw = neg_cws[{j,{}}]
		weight = self.c_weight[{{(j-1)*embdim+1,j*embdim},{}}]
		bias = self.c_bias[{{(j-1)*embdim+1,j*embdim},{}}]

		for i = 1, datasize do
			-- positive 
			local id1 = pos_cw[{i}]
			if delta_cemb[id1] == nil then 
				delta_cemb[id1] = torch.zeros(embdim) 
			end
			local range = {{i},{}}
			local temp = weight * 
						torch.cmul(pos_ftemb[range], pos_fh[range]):t()
			delta_cemb[id1]:add(temp):add(bias)

			-- negative
			local id1 = neg_cw[{i}]
			if delta_cemb[id1] == nil then 
				delta_cemb[id1] = torch.zeros(embdim) 
			end
			local range = {{i},{}}
			local temp = weight * 
						torch.cmul(neg_ftemb[range], neg_fh[range]):t()
			delta_cemb[id1]:add(-temp):add(-bias)
		end
	end

	for _,delta in pairs(delta_cemb) do
		delta:div(datasize)
	end

	-- compute reconstruction error
	local err = torch.zeros(nctxwords+1)
	for i = 1,nctxwords do
		err[{i}] = torch.ne(pos_cws[{i,{}}], neg_cws[{i,{}}]):double()
					:sum() / datasize
	end
	err[{-1}] = torch.ne(pos_tw, neg_tw):double():sum() / datasize

	if math.mod(batch, nbatch_to_show) == 0 then
	--	print(delta_hidbias)
		print('-------------------')
		print(delta_t_weight:sum())
		print(delta_c_weight:sum())
		print(delta_hidbias:sum())
		print(pos_h:sum() / pos_h:numel())
		print(neg_h:sum() / neg_h:numel())

		--print('--- cws ---')
		--for i = 1,20 do
		--	print(	dic.id2word[pos_cws[{2,i}]] .. ' ' ..
		--			dic.id2word[neg_cws[{2,i}]])
		--end
	end

	return delta_t_weight, delta_c_weight,  
			delta_t_bias, delta_c_bias, delta_hidbias, 
			delta_temb, delta_cemb, err
end

nbatch_to_show = 100
batch = 0

-- training
-- data is a matrix, each column is a visible data point
function MultCtxRBM:train( data, nepoch, batchsize, 
						init_momentum, final_momentum, eps, 
						weightcost, chainlen, sampler_storage )

	local datasize = data:size(2)
	local change_t_weight 	= torch.zeros(self.t_weight:size())
	local change_c_weight 	= torch.zeros(self.c_weight:size())
	local change_t_bias 	= torch.zeros(self.t_bias:size())
	local change_c_bias 	= torch.zeros(self.c_bias:size())
	local change_hidbias 	= torch.zeros(self.hidbias:size())
	local change_temb 		= torch.zeros(self.temb:size())
	local change_cemb 		= torch.zeros(self.cemb:size())

	local momentum = init_momentum
	
	for epoch = 1,nepoch do
		print('=== epoch ' ..  epoch .. '  ===')

		--p:start('epoch')
		local total_err = torch.zeros(2)
		if epoch > 3 then
			momentum = final_momentum
		end	

		batch = 0
		total_err = torch.zeros(self.nctxwords+1)
		while true do
			-- extract a batch
			batch = batch + 1
			local startid_batch = (batch-1) * batchsize + 1
			local endid_batch = math.min(batch*batchsize, datasize)
			local batchdata = data[{{}, {startid_batch,endid_batch}}]		
			
			-- update params
			delta_t_weight, delta_c_weight,  
				delta_t_bias, delta_c_bias, delta_hidbias, 
				delta_temb, delta_cemb, err
				= self:compute_grads(batchdata, chainlen, sampler_storage)

			change_t_weight	:mul(momentum):add(delta_t_weight * eps)
							:add(-self.t_weight * eps * weightcost)
			change_c_weight	:mul(momentum):add(delta_c_weight * eps)
							:add(-self.c_weight * eps * weightcost)
			change_t_bias	:mul(momentum):add(delta_t_bias * eps)
			change_c_bias	:mul(momentum):add(delta_c_bias * eps)
			change_hidbias	:mul(momentum):add(delta_hidbias * eps)

			change_temb:mul(momentum)--:add(-self.emb * eps * weightcost)
			for i, delta in pairs(delta_temb) do
				change_temb[{i,{}}]:add(delta * eps)
			end
	
			change_cemb:mul(momentum)--:add(-self.emb * eps * weightcost)
			for i, delta in pairs(delta_cemb) do
				change_cemb[{i,{}}]:add(delta * eps)
			end
	
			self.t_weight	:add(change_t_weight)
			self.c_weight	:add(change_c_weight)
			self.t_bias		:add(change_t_bias)
			self.c_bias		:add(change_c_bias)
			self.hidbias	:add(change_hidbias)
			self.temb		:add(change_temb)
			self.cemb		:add(change_cemb)	

			collectgarbage()

			total_err:add(err)
			if math.mod(batch, nbatch_to_show) == 0 then
				print('batch ' .. batch)
				self:save_to_file('model/' .. epoch .. '.model')
				print(total_err / nbatch_to_show) 
				total_err:fill(0)
				io.flush()
			end
		
			if endid_batch == datasize then
				break
			end
		end
	end
end
