-- An implement for Factorized Composition Boltzmann Machine (FCBM)
-- currently support only binary input and binary hidden units

require 'xlua'
p = xlua.Profiler()

require 'cutils'
require 'utils'

LexSemCRBM = {}
LexSemCRBM_mt = { __index = LexSemCRBM }

-- construction
function LexSemCRBM:new(temb, cemb, nctxwords, hiddim, mult )
	local mult = mult or 0.01
	local embdim = cemb:size(2)

	local model = {
			embdim		= embdim,
			hiddim 		= hiddim, 
			nctxwords	= nctxwords,
			ntypes 		= temb:size(2),
			
			c_emb 		= cemb,
			t_emb 		= temb,
			t_weight	= torch.randn(embdim, hiddim):mul(mult),
			c_weight 	= torch.randn(embdim*nctxwords, hiddim):mul(mult*0.1),
			t_bias 		= torch.randn(embdim, 1):mul(mult),
			c_bias		= torch.randn(embdim*nctxwords, 1):mul(mult),
			hidbias		= torch.randn(hiddim, 1):mul(0),

			t_count		= torch.zeros(temb:size(1), temb:size(2))
		}
	setmetatable(model, LexSemCRBM_mt)
	return model
end

-- save to file
function LexSemCRBM:save_to_file( path )
	local f = torch.DiskFile(path, 'w')
	f:binary()
	f:writeObject(self)
	f:close()
end

-- load from a file
function LexSemCRBM:load_from_file( path )
	local f = torch.DiskFile(path, 'r')
	f:binary()
	model = f:readObject()
	f:close()
	model.t_count = model.t_count or 
					torch.zeros(model.t_emb:size(1), model.t_emb:size(2))
	setmetatable(model, LexSemCRBM_mt)
	return model
end

function LexSemCRBM:compute_embs(ws, wembs)
	local embdim = self.embdim
	local emb = torch.zeros(embdim * ws:size(1), ws:size(2))
	for i = 1,ws:size(1) do
		emb[{{(i-1)*embdim+1,i*embdim},{}}]
			:copy(wembs:index(1, ws[{i,{}}]:long()):t())
	end

	return emb:t()
end

function LexSemCRBM:compute_tword_embs(ws, types)
	local tembs = torch.zeros(ws:size(1), self.embdim)
	--p:start('tword')
	local nwords = self.t_emb:size(1)
	local id = (ws-1):mul(self.ntypes):long():add(types:long())
	self.t_emb:resize(nwords*self.ntypes, self.embdim)
	tembs = self.t_emb:index(1, id)
	self.t_emb:resize(nwords, self.ntypes, self.embdim)
	--p:lap('tword') 
	return tembs
end

function LexSemCRBM:compute_tword_embs_tprobs(ws, tprobs)
	local temb = self.t_emb:index(1, ws:long())
	return multi_vm(tprobs:t(), temb)
end


function LexSemCRBM:compute_hid_probs_states(tw, cws, types)
	local dim		= self.dim
	local datasize	= tw:size(1)

	-- compute probs
	local cembs	= self:compute_embs(cws, self.c_emb)
	local temb	= self:compute_tword_embs(tw, types)
	local probs	= sigmoid(
			(temb * self.t_weight)
			:add(cembs * self.c_weight):t()
			:add(torch.repeatTensor(self.hidbias, 1, datasize)))
	local states = sample_bernoulli(probs):double()
	return probs, states
end

function LexSemCRBM:compute_type_probs_states(tw, hidstates)
	local datasize	= tw:size(1)
	local ntypes	= self.ntypes
	local probs		= torch.zeros(ntypes, datasize)
	local types		= torch.repeatTensor(
						torch.linspace(1,ntypes,ntypes):resize(ntypes,1),
						1, datasize)

	for i = 1, self.ntypes do
		local range	= {i,{}}
		local ts	= types[range]
		local temb	= self:compute_tword_embs(tw, ts)
		probs[range]:copy( 
				torch.cmul(
					temb:t(),
					(self.t_weight * hidstates)
					:add(torch.repeatTensor(self.t_bias, 1, datasize))
				):sum(1))
	end
	
	probs	= safe_compute_softmax(probs)
	states	= roulette_sampling(probs)
	return probs, states
end

function LexSemCRBM:compute_tw_probs_states(tw, hidstates, types, 
										chainlen, sampler_storage )

	local embdim	= self.embdim
	local hiddim	= self.hiddim
	local datasize	= tw:size(1)

	--p:start('tw.sample')
	local sample = gen_alias_sampling(sampler_storage.w_ws.alias,
					sampler_storage.w_ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(tw:long())

	sample:resize(chainlen * datasize)
	local prob 	 = sampler_storage.w_prob:index(1,sample)
					:resize(chainlen,datasize)
	--p:lap('tw.sample')

	--p:start('tw.temb')
	local temb	 = self:compute_tword_embs(sample, 
										torch.repeatTensor(types,chainlen))
					:resize(chainlen, datasize, embdim)
					:transpose(1,2)
	--p:lap('tw.temb')
	--p:start('tw.f')
	local f		 = 	(self.t_weight * hidstates)
					:add(torch.repeatTensor(self.t_bias, 1, datasize))
	--p:lap('tw.f')
	--p:start('tw.ratio')
	local ratios = multi_mv(temb, f:t()):exp():t():cdiv(prob)
	--p:lap('tw.ratio')

	--p:start('tw.mh')
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	--p:lap('tw.mh')
		
	return nil, sample[{-1,{}}]:double()
end

-- sample visible units from hidden units
-- h is a matrix, each column is a hidden state point
function LexSemCRBM:compute_word_probs_states_mcmc( w, hidemb, wemb,
										chainlen, sampler_storage )
	local datasize = w:size(1)
	local embdim = self.embdim

	local sample = gen_alias_sampling(sampler_storage.ws.alias,
					sampler_storage.ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(w:long())
	sample:resize(chainlen*datasize)
	local prob = sampler_storage.w_prob:index(1,sample)
					:resize(chainlen,datasize)
	local emb = wemb:index(1, sample):resize(chainlen, datasize, embdim)
					:transpose(1,2)
	local ratios = multi_mv(emb, hidemb:t()):t():exp():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	
	return nil, sample[{-1,{}}]:double()
end

function LexSemCRBM:compute_cwords_probs_states(cws, hid_states, 
										chainlen , sampler_storage )
	local nctxwords = self.nctxwords
	local embdim = self.embdim
	local datasize = cws:size(2)

	-- sampling ctx words
	local new_cws = torch.Tensor(cws:size())
	local hidembs = (self.c_weight * hid_states)
					:add(torch.repeatTensor(self.c_bias, 1, datasize))

	for i = 1,nctxwords do
		local hidemb = hidembs[{{(i-1)*embdim+1,i*embdim},{}}]
		local _, cw = self:compute_word_probs_states_mcmc(
									cws[{i,{}}],hidemb,self.c_emb,chainlen, 
									{ ws ={alias=sampler_storage.cw_ws.alias,
										prob =sampler_storage.cw_ws.prob },
									w_prob = sampler_storage.cw_prob })
		new_cws[{i,{}}]:copy(cw)
	end

	return _, new_cws
end

function LexSemCRBM:count_types(data)
	local datasize	= data:size(2)
	local embdim 	= self.embdim
	local hiddim 	= self.hiddim 
	local ntypes 	= self.ntypes

	local pos_tw	= data[{1,{}}]
	local pos_cws	= data[{{2,-1},{}}]
	local pos_t		= torch.rand(datasize):mul(ntypes):long():add(1)
	
	pos_h_probs, pos_h = self:compute_hid_probs_states(pos_tw, pos_cws, pos_t)
	pos_t_probs, pos_t = self:compute_type_probs_states(pos_tw, pos_h)
	pos_h_probs, pos_h = self:compute_hid_probs_states(pos_tw, pos_cws, pos_t)
	pos_t_probs, pos_t = self:compute_type_probs_states(pos_tw, pos_h)
	
end

-- compute the change in params, using 1-step Contrastive Divergence (CD1)
-- data is a matrix which each column is a datum point
function LexSemCRBM:compute_grads(data, chainlen, sampler_storage)
	local datasize	= data:size(2)
	local embdim 	= self.embdim
	local hiddim 	= self.hiddim 
	local ntypes 	= self.ntypes

	--================== positive phase ===============--
	local pos_tw	= data[{1,{}}]
	local pos_cws	= data[{{2,-1},{}}]
	local pos_t		= torch.rand(datasize):mul(ntypes):long():add(1)

	local pos_t_probs = nil
	local pos_h_probs = nil
	local neg_t_probs = nil
	local neg_h_probs = nil

	--p:start('positive')
	for i = 1, 3 do
		pos_h_probs, pos_h = self:compute_hid_probs_states(pos_tw, 
														pos_cws, pos_t)
		pos_t_probs, pos_t = self:compute_type_probs_states(pos_tw, pos_h)
	end
	--p:lap('positive')

	--================== negative phase ==================--
	--p:start('negative')
	--p:start('neg.tw')
	_, neg_tw  = self:compute_tw_probs_states(pos_tw, pos_h, pos_t, 
											chainlen, sampler_storage)
	--p:lap('neg.tw')
	--[[_, neg_cws = self:compute_cwords_probs_states(pos_cws, pos_h, 
											chainlen, sampler_storage)]]
	neg_cws	= pos_cws:clone()
	neg_t	= pos_t:clone()
	for i = 1, 3 do
		neg_h_probs, neg_h = self:compute_hid_probs_states(neg_tw, 
														neg_cws, neg_t)
		neg_t_probs, neg_t = self:compute_type_probs_states(neg_tw, neg_h)
	end
	--p:lap('negative')

	--================= compute delta fweight =============
	--p:start('update')
	pos_h = pos_h_probs
	neg_h = neg_h_probs
	pos_t = pos_t_probs
	neg_t = neg_t_probs
	
	local pos_temb 	= self:compute_tword_embs_tprobs(pos_tw, pos_t)
	local neg_temb 	= self:compute_tword_embs_tprobs(neg_tw, neg_t)
	local pos_cembs	= self:compute_embs(pos_cws, self.c_emb)
	local neg_cembs	= self:compute_embs(neg_cws, self.c_emb)

	local delta_t_weight = (pos_temb:t() * pos_h:t() -
							neg_temb:t() * neg_h:t()):div(datasize)
	local delta_c_weight = (pos_cembs:t() * pos_h:t() - 
							neg_cembs:t() * neg_h:t()):div(datasize)

	local delta_t_bias	 = (pos_temb:t() - neg_temb:t()):sum(2):div(datasize)
	local delta_c_bias	 = (pos_cembs:t()-neg_cembs:t()):sum(2):div(datasize)
	local delta_hidbias	 = (pos_h - neg_h)				:sum(2):div(datasize)

	-- compute delta cemb 	
	local delta_cemb = {}
	local pos_hidcwemb = (self.c_weight * pos_h)
						:add(torch.repeatTensor(self.c_bias, 1, datasize))
	local neg_hidcwemb = (self.c_weight * neg_h)
						:add(torch.repeatTensor(self.c_bias, 1, datasize))

	--p:start('up.cws')
	for j = 1,datasize do
		local temp0 = pos_hidcwemb[{{},{j}}]:t()
		local temp1 = neg_hidcwemb[{{},{j}}]:t()
		for i = 1,nctxwords do
			local id0 = pos_cws[{i,j}]
			local id1 = neg_cws[{i,j}]
			local range = {{},{(i-1)*embdim+1, i*embdim}}
			if delta_cemb[id0] == nil then 
				delta_cemb[id0] = torch.zeros(embdim)
			end 
			if delta_cemb[id1] == nil then
				delta_cemb[id1] = torch.zeros(embdim)
			end
			delta_cemb[id0]:add(temp0[range])
			delta_cemb[id1]:add(-temp1[range])
		end
	end
	for _,delta in pairs(delta_cemb) do
		delta:div(datasize)
	end
	--p:lap('up.cws')

	-- compute delta temb
	--p:start('up.tw')
	local delta_temb = {}
	local pos_hidtemb = (self.t_weight * pos_h):
						add(torch.repeatTensor(self.t_bias, 1, datasize))
	local neg_hidtemb = (self.t_weight * neg_h):
						add(torch.repeatTensor(self.t_bias, 1, datasize))
	
	for j = 1,datasize do
		local temp0 = pos_hidtemb[{{},{j}}]:t()
		local temp1 = neg_hidtemb[{{},{j}}]:t()
		local id0 = pos_tw[{j}]
		local id1 = neg_tw[{j}]

		if delta_temb[id0] == nil then 
			delta_temb[id0] = torch.zeros(ntypes, embdim)
		end 
		if delta_temb[id1] == nil then 
			delta_temb[id1] = torch.zeros(ntypes, embdim)
		end

		delta_temb[id0]:add(pos_t[{{},{j}}] * temp0)
		delta_temb[id1]:add(-neg_t[{{},{j}}] * temp1)
	end

	for _,delta in pairs(delta_temb) do
		delta:div(datasize)
	end
	--p:lap('up.tw')
	-- updating count
	for i = 1,datasize do
		self.t_count[{pos_tw[{i}],{}}]:add(pos_t[{{},i}])
	end

	--p:lap('update')

	-- reconstruction error
	local err = torch.zeros(data:size(1))
	err[{1}] = torch.ne(pos_tw, neg_tw):double():sum() / datasize
	for i = 1,nctxwords do
		err[{i+1}] = torch.ne(pos_cws[{i,{}}], neg_cws[{i,{}}]):double()
					:sum() / datasize
	end

	-- print 
	if math.mod(batch, nbatch_to_show) == 0 then
		print(delta_t_weight:sum())
		print(delta_c_weight:sum())
		print(delta_hidbias:sum())
		print(pos_h:sum() / pos_h:numel())
		print(neg_h:sum() / neg_h:numel())
		print('pos')
		print(pos_tw[{{1,10}}]:resize(1,10))
		print(pos_t[{{},{1,10}}])
		print('neg')
		print(neg_tw[{{1,10}}]:resize(1,10))
		print(neg_t[{{},{1,10}}])
		print((pos_h - neg_h):abs():sum() / pos_h:numel())
	end

	return 
		delta_temb, delta_cemb, 
		delta_t_weight, delta_c_weight,
		delta_t_bias, delta_c_bias, delta_hidbias,
		err
end

nbatch_to_show = 50
nbatch_to_save = 200
batch = 0

-- training
-- data is a matrix, each column is a visible data point
function LexSemCRBM:train( data, nepoch, batchsize, 
						init_momentum, final_momentum, eps, 
						weightcost, chainlen, sampler_storage )

	local datasize = data:size(2)
	local change_t_weight 	= torch.zeros(self.t_weight:size())
	local change_c_weight 	= torch.zeros(self.c_weight:size())
	local change_t_bias 	= torch.zeros(self.t_bias:size())
	local change_c_bias		= torch.zeros(self.c_bias:size())
	local change_hidbias 	= torch.zeros(self.hidbias:size())
	local change_temb 		= torch.zeros(self.t_emb:size())
	local change_cemb 		= torch.zeros(self.c_emb:size())

	local momentum = init_momentum
	
	for epoch = 4,nepoch do
		print('=== epoch ' ..  epoch .. '  ===')
		self.t_count:fill(0)

		--p:start('epoch')
		local total_err = torch.zeros(2)
		if epoch > 3 then
			momentum = final_momentum
		end	

		batch = 0
		total_err = torch.zeros(data:size(1)) 
		while true do
			-- extract a batch
			p:start('batch')
			batch = batch + 1
			local startid_batch = (batch-1) * batchsize + 1
			local endid_batch = math.min(batch*batchsize, datasize)
			local batchdata = data[{{}, {startid_batch,endid_batch}}]		
			
			-- update params
			delta_temb, delta_cemb, 
			delta_t_weight, delta_c_weight,
			delta_t_bias, delta_c_bias, delta_hidbias, err
				= self:compute_grads(batchdata, chainlen, sampler_storage)

			change_t_weight	:mul(momentum):add(delta_t_weight * eps)
							:add(-self.t_weight * eps * weightcost)
			change_c_weight	:mul(momentum):add(delta_c_weight * eps)
							:add(-self.c_weight * eps * weightcost)
			change_t_bias	:mul(momentum):add(delta_t_bias * eps)
			change_c_bias	:mul(momentum):add(delta_c_bias * eps)
			change_hidbias	:mul(momentum):add(delta_hidbias * eps)

			change_temb:mul(momentum)
			for i, delta in pairs(delta_temb) do
				change_temb[{i,{}}]:add(delta * eps)
			end
	
			change_cemb:mul(momentum)
			for i, delta in pairs(delta_cemb) do
				change_cemb[{i,{}}]:add(delta * eps)
			end
	
			self.t_weight	:add(change_t_weight)
			self.c_weight	:add(change_c_weight)
			self.t_bias		:add(change_t_bias)
			self.c_bias		:add(change_c_bias)
			self.hidbias	:add(change_hidbias)
			self.t_emb		:add(change_temb)
			self.c_emb		:add(change_cemb)	

			total_err = total_err + err
			if math.mod(batch, nbatch_to_show) == 0 then
				print('batch ' .. batch)
				print(total_err / nbatch_to_show) 
				total_err:fill(0)
				io.flush()
				p:printAll()
			end

			if math.mod(batch, nbatch_to_save) == 0 then
				self:save_to_file('model/' .. epoch .. '.model')
			end
	
			if endid_batch == datasize then break end
			collectgarbage()

			p:lap('batch')
		end
	end
end
