-- An implement for restricted Boltzmann machine 
-- currently support only binary input and binary hidden units

require 'xlua'
p = xlua.Profiler()

require 'cutils'
require 'utils'
require 'add_comp_rbm'
require 'ctx_rbm'

AddVeCoSemDBM = {}
AddVeCoSemDBM_mt = { __index = AddVeCoSemDBM }

-- construction
function AddVeCoSemDBM:new( ctx_rbm, addcomp_rbm , wemb )
	local model = {
			embdim		= ctx_rbm.embdim,
			hiddim		= ctx_rbm.hiddim,
			compdim		= addcomp_rbm.compdim,
			nctxwords	= ctx_rbm.nctxwords,
			nwords		= addcomp_rbm.nwords,
			
			wemb		= addcomp_rbm.wemb,
			wembcomp	= addcomp_rbm.wembcomp,
			wembbias	= addcomp_rbm.wembbias,
			comphid		= ctx_rbm.comphid,
			compbias	= 	(ctx_rbm.compbias+addcomp_rbm.compbias)
							:mul(0.5),

			cwemb		= ctx_rbm.cwemb,	
			cwembbias	= ctx_rbm.cwembbias,
			cwembhid	= ctx_rbm.cwembhid,
			hidbias		= ctx_rbm.hidbias
		}

	setmetatable(model, AddVeCoSemDBM_mt)
	return model
end

-- save to file
function AddVeCoSemDBM:save_to_file( path )
	local f = torch.DiskFile(path, 'w')
	f:binary()
	f:writeObject(self)
	f:close()
end

-- load from a file
function AddVeCoSemDBM:load_from_file( path )
	local f = torch.DiskFile(path, 'r')
	f:binary()
	model = f:readObject()
	f:close()
	setmetatable(model, AddVeCoSemDBM_mt)
	return model
end

function AddVeCoSemDBM:compute_embs( ws, wembs )
	local embdim = self.embdim
	local emb = torch.zeros(embdim * ws:size(1), ws:size(2))
	for i = 1,ws:size(1) do
		emb[{{(i-1)*embdim+1,i*embdim},{}}]
			:copy(wembs:index(1, ws[{i,{}}]:long()):t())
	end

	return emb
end

-- compute hidden unit probs using mean-field update
function AddVeCoSemDBM:mean_field_updates( cws, ws )
	local embdim = self.embdim
	local compdim = self.compdim
	local hiddim = self.hiddim
	local datasize = cws:size(2)
	local nctxwords = self.nctxwords

	local cembs = self:compute_embs(cws, self.cwemb)
	local wembs = self:compute_embs(ws, self.wemb)

	-- randomly init
	local comp_probs = sigmoid(
						(self.wembcomp:t() * wembs * 2 )
						:add(torch.repeatTensor(self.compbias, 1, datasize)))

	local hid_probs = sigmoid(
						(self.comphid:t() * comp_probs)
						:add(self.cwembhid:t() * cembs)
						:add(torch.repeatTensor(self.hidbias, 1, datasize)))

	-- mean-field updates
	for iter = 1,100 do 
		old_comp_probs = comp_probs
		old_hid_probs = hid_probs

		comp_probs = sigmoid(
						(self.wembcomp:t() * wembs)
						:add(self.comphid * hid_probs)
						:add(torch.repeatTensor(self.compbias, 1, datasize)))
		
		hid_probs = sigmoid(
						(self.comphid:t() * comp_probs)
						:add(self.cwembhid:t() * cembs)
						:add(torch.repeatTensor(self.hidbias, 1, datasize)))

		--[[print('-------' .. iter)
		print( (old_comp_probs-comp_probs):abs():sum() )
		print( (old_hid_probs - hid_probs):abs():sum() )]]
		local total_numel = comp_probs:numel()
		if (old_comp_probs-comp_probs):abs():sum()/total_numel < 1e-6
		and (old_hid_probs-hid_probs):abs():sum()/total_numel < 1e-6 then 
			break
		end
	end

	return comp_probs, hid_probs
end

function AddVeCoSemDBM:compute_word_probs_states_mcmc( w, hidemb, wemb,
										chainlen, sampler_storage )
	local datasize = w:size(1)
	local embdim = self.embdim

	local sample = gen_alias_sampling(sampler_storage.ws.alias,
					sampler_storage.ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(w:long())
	sample:resize(chainlen*datasize)
	local prob = sampler_storage.w_prob:index(1,sample)
					:resize(chainlen,datasize)
	local emb = wemb:index(1, sample):resize(chainlen, datasize, embdim)
					:transpose(1,2)
	local id = torch.linspace(1,datasize,datasize):long()
	local ratios = multi_mv(emb, id, hidemb:t(), id):t():exp():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	
	return nil, sample[{-1,{}}]:double()
end

function AddVeCoSemDBM:compute_word_probs_states(hidemb, wemb)
	local datasize = hidemb:size(2)
	local probs = safe_compute_softmax(wemb * hidemb)
	local states = roulette_sampling(probs)
	return probs, states
end

function AddVeCoSemDBM:compute_vis_probs_states(cws, ws, comp_states, 
								hid_states,	chainlen, sampler_storage )

	local new_cws = torch.zeros(cws:size())
	local new_ws = torch.zeros(ws:size())
	local samplesize = cws:size(2)
	local embdim = self.embdim

	-- sampling ctx words
	local hidcembs = (self.cwembhid * hid_states)
					:add(torch.repeatTensor(self.cwembbias,1,samplesize))

	for i = 1,self.nctxwords do
		local hidcemb = hidcembs[{{(i-1)*embdim+1,i*embdim},{}}]
		local _, cw = self:compute_word_probs_states_mcmc(
						cws[{i,{}}],hidcemb,self.cwemb,chainlen, 
						{ws = {alias = sampler_storage.cw_ws.alias,
								prob = sampler_storage.cw_ws.prob },
						w_prob = sampler_storage.cw_prob })
		new_cws[{i,{}}]:copy(cw)
	end
	-- sampling words
	local hidwembs = (self.wembcomp * comp_states)
					:add(torch.repeatTensor(self.wembbias, 1, samplesize))
	local wembs = self.wemb:index(1, self.wlist)

	for i = 1,self.nwords do
		local hidwemb = hidwembs[{{(i-1)*embdim+1,i*embdim},{}}]
		--[[local _, new_w = self:compute_word_probs_states_mcmc(
					ws[{i,{}}],	hidwemb, self.wemb, chainlen,
					{ ws = { alias = sampler_storage.w_ws[i].alias,
							prob = sampler_storage.w_ws[i].prob },
					w_prob = sampler_storage.w_prob[i] } ) ]]

		local probs, states = self:compute_word_probs_states(hidwemb, wembs)
		new_ws[{i,{}}]:copy(self.wlist:index(1, states))
	end

	return _, new_cws, _, new_ws
end

-- sampling from model
function AddVeCoSemDBM:sample_from_model( samples , sampler_storage, 
									niters , chainlen ) 

	local samplesize = samples.samplesize
	local embdim = self.embdim
	local hiddim = self.hiddim
	local compdim = self.compdim
	local nctxwords = self.nctxwords

	-- Gibbs sampling 
	for iter = 1,niters do
		-- sampling comp
		local wembs = self:compute_embs(samples.ws, self.wemb)
		samples.comp_probs = sigmoid(
						(self.wembcomp:t() * wembs)
						:add(self.comphid * samples.hid_states)
						:add(torch.repeatTensor(self.compbias,1,samplesize)))
		samples.comp_states = sample_bernoulli(samples.comp_probs):double()

		-- sampling hidden
	 	local cembs = self:compute_embs(samples.cws, self.cwemb) 
		samples.hid_probs = sigmoid(
						(self.comphid:t() * samples.comp_states)
						:add(self.cwembhid:t() * cembs)
						:add(torch.repeatTensor(self.hidbias,1,samplesize)))
		samples.hid_states = sample_bernoulli(samples.hid_probs):double()

		-- sampling ctx words
		_, samples.cws, _, samples.ws = 
				self:compute_vis_probs_states(samples.cws, samples.ws,
									samples.comp_states, samples.hid_states,
									chainlen, sampler_storage)	
	end

	return samples
end

function AddVeCoSemDBM:compute_comp_hid_probs_gibbs(cws, ws,
												nmarkovs, fireiter) 

	local datasize = cws:size(2)
	local fireiter = fireiter or 100
	local embdim = self.embdim
	local hiddim = self.hiddim
	local compdim = self.compbias:size(1)
	local nctxwords = self.nctxwords

	local comp_states = torch.zeros(nmarkovs, compdim, datasize)
	local hid_states = torch.zeros(nmarkovs, hiddim, datasize)
	
	local wembs = self:compute_embs(ws, self.wemb)
 	local cembs = self:compute_embs(cws, self.cwemb) 

	local comp_probs = sigmoid(
					(self.wembcomp:t() * wembs)
					:add(torch.repeatTensor(self.compbias,1,datasize)))
	comp_states[{1,{},{}}]:copy(sample_bernoulli(comp_probs):double())

	local hid_probs = sigmoid(
					(self.comphid:t() * comp_states[{1,{},{}}])
					:add(self.cwembhid:t() * cembs)
					:add(torch.repeatTensor(self.hidbias,1,datasize)))
	hid_states[{1,{},{}}]:copy(sample_bernoulli(hid_probs):double())

	-- Gibbs sampling 
	for i = 2,nmarkovs do
		-- sampling comp
		comp_probs = sigmoid(
						(self.wembcomp:t() * wembs)
						:add(self.comphid * hid_states[{i-1,{},{}}])
						:add(torch.repeatTensor(self.compbias,1,datasize)))
		comp_states[{i,{},{}}]:copy(sample_bernoulli(comp_probs):double())

		-- sampling hidden
		hid_probs = sigmoid(
						(self.comphid:t() * comp_states[{i,{},{}}])
						:add(self.cwembhid:t() * cembs)
						:add(torch.repeatTensor(self.hidbias,1,datasize)))
		hid_states[{i,{},{}}]:copy(sample_bernoulli(hid_probs):double())

	end

	return 
		comp_states[{{fireiter,-1},{},{}}]:mean(1)[{1,{},{}}],
		hid_states[{{fireiter,-1},{},{}}]:mean(1)[{1,{},{}}]
end


-- compute the change in params, using 1-step Contrastive Divergence (CD1)
-- data is a matrix which each column is a datum point
function AddVeCoSemDBM:compute_grads( data, samples, chainlen, 
									sampler_storage , mc_niters )

	local embdim = self.embdim
	local hiddim = self.hiddim 
	local compdim = self.compdim
	local datasize = data:size(2)
	local samplesize = samples.samplesize
	local nctxwords = self.nctxwords

	--============ pos & neg phases ==========--
	local pos_cws = data[{{2,-1},{}}]
	local pos_ws = data[{{1},{}}]
	pos_comp_probs, pos_hid_probs = self:mean_field_updates(pos_cws, pos_ws)
	self:sample_from_model(samples, sampler_storage, mc_niters, chainlen)

	--============ compute grads ==========--
	local neg_ws = samples.ws
	local neg_cws = samples.cws
	local pos_comp = pos_comp_probs
	local neg_comp = samples.comp_probs
	local pos_h = pos_hid_probs
	local neg_h = samples.hid_probs

	local pos_cembs = self:compute_embs(pos_cws, self.cwemb)
	local neg_cembs = self:compute_embs(neg_cws, self.cwemb)

	local delta_cwembhid = 	(pos_cembs*pos_h:t()):div(datasize) - 
							(neg_cembs*neg_h:t()):div(samplesize)

	-- compute delta cwemb
	local delta_cwemb = {}
	local pos_hidcwemb = (self.cwembhid * pos_h)
						:add(torch.repeatTensor(self.cwembbias, 1, datasize))
						:div(datasize)
	local neg_hidcwemb = (self.cwembhid * neg_h)
						:add(torch.repeatTensor(self.cwembbias,1,samplesize))
						:div(samplesize)
	for j = 1,datasize do
		local temp0 = pos_hidcwemb[{{},{j}}]:t()
		for i = 1,nctxwords do
			local id0 = pos_cws[{i,j}]
			local range = {{},{(i-1)*embdim+1, i*embdim}}
			if delta_cwemb[id0] == nil then 
				delta_cwemb[id0] = torch.zeros(embdim)
			end 
			delta_cwemb[id0]:add(temp0[range])
		end
	end

	for j = 1,samplesize do
		local temp1 = neg_hidcwemb[{{},{j}}]:t()
		for i = 1,nctxwords do
			local id1 = neg_cws[{i,j}]
			local range = {{},{(i-1)*embdim+1, i*embdim}}
			if delta_cwemb[id1] == nil then 
				delta_cwemb[id1] = torch.zeros(embdim)
			end
			delta_cwemb[id1]:add(-temp1[range])
		end
	end


	-- compute delta comphid
	local delta_comphid = 	(pos_comp * pos_h:t()):div(datasize) - 
							(neg_comp * neg_h:t()):div(samplesize)

	-- compute delta biases
	local delta_cwembbias = pos_cembs:sum(2):div(datasize) - 
							neg_cembs:sum(2):div(samplesize)
	local delta_hidbias = 	pos_h:sum(2):div(datasize) - 
							neg_h:sum(2):div(samplesize)
	local delta_compbias = 	pos_comp:sum(2):div(datasize) - 
							neg_comp:sum(2):div(samplesize)

	-- compute deltas for words
	local pos_wembs = self:compute_embs(pos_ws, self.wemb)
	local neg_wembs = self:compute_embs(neg_ws, self.wemb)

	local delta_wembcomp = 	(pos_wembs * pos_comp:t()):div(datasize) -
							(neg_wembs * neg_comp:t()):div(samplesize)

	-- compute delta emb
	local delta_wemb = {}
	local pos_compwemb = (self.wembcomp * pos_comp)
						:add(torch.repeatTensor(self.wembbias, 1, datasize))
						:div(datasize)
	local neg_compwemb = (self.wembcomp * neg_comp)
						:add(torch.repeatTensor(self.wembbias,1,samplesize))
						:div(samplesize)

	for j = 1, datasize do
		local temp0 = pos_compwemb[{{},{j}}]:t()
		for i = 1,self.nwords do
			local id0 = pos_ws[{i,j}]
			local range = {{},{(i-1)*embdim+1,i*embdim}}
			if delta_wemb[id0]==nil then 
				delta_wemb[id0] = torch.zeros(embdim) 
			end
			delta_wemb[id0]:add(temp0[range])
		end
	end

	for j = 1, samplesize do
		local temp1 = neg_compwemb[{{},{j}}]:t()
		for i = 1,self.nwords do
			local id1 = neg_ws[{i,j}]
			local range = {{},{(i-1)*embdim+1,i*embdim}}
			if delta_wemb[id1]==nil then 
				delta_wemb[id1] = torch.zeros(embdim) 
			end
			delta_wemb[id1]:add(-temp1[range])
		end
	end

-- compute delta biases
	local delta_wembbias = 	pos_wembs:sum(2):div(datasize) - 
							neg_wembs:sum(2):div(samplesize)


	if math.mod(batch, nbatch_to_show) == 0 then
		print('--------------')
		print(pos_h:sum() / pos_h:numel())
		print(neg_h:sum() / neg_h:numel())
		
		print(pos_comp:sum() / pos_comp:numel())
		print(neg_comp:sum() / neg_comp:numel())
		
		print(self.cwembhid:sum())
		print(self.comphid:sum())
	end

	return 	delta_wemb, delta_wembcomp, delta_wembbias,
			delta_compbias, delta_comphid,
			delta_cwemb, delta_cwembbias, delta_cwembhid,
			delta_hidbias
end

function AddVeCoSemDBM:reconstruct_data( data, sampler_storage, chainlen)
	local datasize = data:size(2)
	local embdim = self.embdim
	local hiddim = self.hiddim
	local compdim = self.compdim
	local nctxwords = self.nctxwords

	local cws = data[{{2,-1},{}}]:clone()
	local ws = data[{{1},{}}]:clone() 
	local comp_probs, hid_probs = self:mean_field_updates(cws, ws)
	local comp_states = sample_bernoulli(comp_probs):double()
	local hid_states = sample_bernoulli(hid_probs):double()

	-- sampling visible units
	_, rec_cws, _, rec_ws = self:compute_vis_probs_states(cws, ws, 
										comp_states, hid_states,
										chainlen, sampler_storage)

	local err = torch.zeros(data:size(1))
	for i = 1,nctxwords do
		err[{i}] = torch.ne(cws[{i,{}}], rec_cws[{i,{}}]):double()
					:sum() / datasize
	end
	for i = 1,self.nwords do
		err[{nctxwords+i}] = torch.ne(ws[{i,{}}], rec_ws[{i,{}}]):double()
						:sum() / datasize
	end

	return rec_cws, rec_ws, err
end

function AddVeCoSemDBM:init_samples( nmarkovs , sampler_storage )
	local nctxwords = self.nctxwords
	local samples = { 
			samplesize = nmarkovs,
			cws = torch.zeros(self.nctxwords, nmarkovs),
			ws = torch.zeros(self.nwords, nmarkovs) }

	for i = 1,self.nctxwords do
		samples.cws[{i,{}}]:copy(
						gen_alias_sampling(sampler_storage.cw_ws.alias,
						sampler_storage.cw_ws.prob, nmarkovs))
	end
	for i = 1,self.nwords do
		samples.ws[{i,{}}]:copy(
					gen_alias_sampling(sampler_storage.w_ws[i].alias,
					sampler_storage.w_ws[i].prob, nmarkovs))
	end

	samples.comp_probs, samples.hid_probs = self:mean_field_updates(
												samples.cws, samples.ws)
	samples.comp_states = sample_bernoulli(samples.comp_probs):double()
	samples.hid_states = sample_bernoulli(samples.hid_probs):double()

	return samples
end

batch = 0
nbatch_to_show = 20
-- training
-- data is a matrix, each column is a visible data point
function AddVeCoSemDBM:train( data, nepoch, batchsize, init_momentum, 
							final_momentum, eps, weightcost, 
							chainlen, nmarkovs, sampler_storage )
	local datasize = data:size(2)
	local mc_niters = 5

	inc_wemb		= torch.zeros(self.wemb:size())
	inc_wembcomp	= torch.zeros(self.wembcomp:size())
	inc_wembbias	= torch.zeros(self.wembbias:size())
	inc_comphid		= torch.zeros(self.comphid:size())
	inc_compbias	= torch.zeros(self.compbias:size())
	inc_cwemb		= torch.zeros(self.cwemb:size())
	inc_cwembhid	= torch.zeros(self.cwembhid:size())
	inc_cwembbias	= torch.zeros(self.cwembbias:size())
	inc_hidbias		= torch.zeros(self.hidbias:size())
	
	local momentum = init_momentum
	local samples = self:init_samples(nmarkovs, sampler_storage)

	-- create target word list
	local ids = sampler_storage.w_prob[1]:gt(0):int()
	local ws = {}
	for i = 1,ids:numel() do
		if ids[{i}] == 1 then ws[#ws+1] = i end
	end
	self.wlist = torch.LongTensor(ws)

	for epoch = 1,nepoch do
		print('=== epoch ' ..  epoch .. '  ===')

		--p:start('epoch')
		local total_err = 0
		if epoch > 3 then
			momentum = final_momentum
		end	

		batch = 0
		local total_err = torch.zeros(data:size(1))

		while true do
			--print('batch ' .. batch)
			-- extract a batch
			batch = batch + 1
			local startid_batch = (batch-1) * batchsize + 1
			local endid_batch = math.min(batch*batchsize, datasize)
			local batchdata = data[{{}, {startid_batch,endid_batch}}]
			
			-- update params
			delta_wemb, delta_wembcomp, delta_wembbias,
			delta_compbias, delta_comphid,
			delta_cwemb, delta_cwembbias, delta_cwembhid,
			delta_hidbias
				= self:compute_grads(batchdata, samples, chainlen, 
									sampler_storage, mc_niters )

			inc_wembcomp	:mul(momentum):add(delta_wembcomp * eps)
								:add(self.wembcomp * (-eps * weightcost))
			inc_wembbias	:mul(momentum):add(delta_wembbias * eps)
			inc_comphid		:mul(momentum):add(delta_comphid * eps)
								:add(self.comphid * (-eps * weightcost))
			inc_compbias	:mul(momentum):add(delta_compbias * eps)
			inc_cwembhid	:mul(momentum):add(delta_cwembhid * eps)
								:add(self.cwembhid * (-eps * weightcost))
			inc_cwembbias	:mul(momentum):add(delta_cwembbias * eps)
			inc_hidbias		:mul(momentum):add(delta_hidbias * eps)
			
			inc_cwemb		:mul(momentum)
			inc_wemb		:mul(momentum)
			for i, delta in pairs(delta_cwemb) do
				inc_cwemb[{i,{}}]:add(delta * eps)
			end
			for i, delta in pairs(delta_wemb) do
				inc_wemb[{i,{}}]:add(delta * eps)
			end

			self.wembcomp		:add(inc_wembcomp)
			self.wembbias		:add(inc_wembbias)
			self.comphid		:add(inc_comphid)
			self.compbias		:add(inc_compbias)
			self.cwemb			:add(inc_cwemb)
			self.wemb			:add(inc_wemb)
			self.cwembhid		:add(inc_cwembhid)
			self.cwembbias		:add(inc_cwembbias)
			self.hidbias		:add(inc_hidbias)

			collectgarbage()
			_,_, err = self:reconstruct_data( batchdata, sampler_storage, 
											chainlen, mf_niters )
			total_err:add(err)

			if math.mod(batch, nbatch_to_show) == 0 then
				print('batch ' .. batch)
				self:save_to_file('vecosem_model/' .. epoch .. '.model')
				total_err:div(nbatch_to_show)
				print(total_err)
				total_err:fill(0)

				io.flush()
			end
	
			if endid_batch == datasize then
				break
			end
		end
	end
end
