-- An implement for restricted Boltzmann machine 
-- currently support only binary input and binary compden units

require 'xlua'
p = xlua.Profiler()

require 'cutils'
require 'utils'

AddCompRBM = {}
AddCompRBM_mt = { __index = AddCompRBM }

-- construction
function AddCompRBM:new( wemb, compdim, nwords, wlist  )
	local mult = mult or 0.01
	local embdim = wemb:size(2)

	local wembcomp = torch.randn(nwords*embdim, compdim):mul(mult)
	local compbias = torch.randn(compdim, 1):mul(0)
	local wembbias = torch.randn(nwords*embdim, 1):mul(0)

	local model = {
			embdim 		= embdim,
			compdim 	= compdim,
			nwords 		= nwords,
			wlist		= wlist,

			wemb 		= wemb,
			wembcomp 	= wembcomp,
			wembbias 	= wembbias,
			compbias 	= compbias
		}

	setmetatable(model, AddCompRBM_mt)
	return model
end

-- save to file
function AddCompRBM:save_to_file( path )
	local f = torch.DiskFile(path, 'w')
	f:binary()
	f:writeObject(self)
	f:close()
end

-- load from a file
function AddCompRBM:load_from_file( path )
	local f = torch.DiskFile(path, 'r')
	f:binary()
	model = f:readObject()
	f:close()
	setmetatable(model, AddCompRBM_mt)
	return model
end

function AddCompRBM:compute_embs( ws, wembs )
	local embdim = self.embdim
	local emb = torch.zeros(embdim * ws:size(1), ws:size(2))
	for i = 1,ws:size(1) do
		emb[{{(i-1)*embdim+1,i*embdim},{}}]
			:copy(wembs:index(1, ws[{i,{}}]:long()):t())
	end

	return emb
end

-- sample compden units from visible units
-- v is a matrix, each column is a visible datum point
function AddCompRBM:compute_comp_probs_states( ws )
	local embdim = self.embdim
	local datasize = ws:size(2)

	local embs = self:compute_embs(ws, self.wemb)

	-- compute probs
	local probs = sigmoid( (self.wembcomp:t() * embs * 2)
					:add(torch.repeatTensor(self.compbias, 1, datasize)) )
	local states = sample_bernoulli(probs):double()
	return probs, states
end

-- sample visible units from compden units
-- h is a matrix, each column is a compden state point
function AddCompRBM:compute_word_probs_states( w, compemb, wemb, 
											chainlen, sampler_storage)
	local datasize = w:size(1)
	local embdim = self.embdim

	local sample = gen_alias_sampling(sampler_storage.ws.alias,
					sampler_storage.ws.prob, chainlen*datasize)
					:resize(chainlen, datasize)
	sample[{1,{}}]:copy(w:long())
	sample:resize(chainlen*datasize)
	local prob = sampler_storage.w_prob:index(1,sample)
					:resize(chainlen,datasize)
	local emb = wemb:index(1, sample):resize(chainlen, datasize, embdim)
					:transpose(1,2)
	local id = torch.linspace(1,datasize,datasize):long()
	local ratios = multi_mv(emb, id, compemb:t(), id):t():exp():cdiv(prob)
	sample:resize(chainlen, datasize)
	indep_chain_metropolis_hastings(sample, ratios)
	
	return nil, sample[{-1,{}}]:double()
end

function AddCompRBM:compute_words_probs_states( ws, comp_states, 
												chainlen , sampler_storage )
	local embdim = self.embdim
	local datasize = comp_states:size(2)	
	local new_ws = torch.Tensor(ws:size())

	-- draw words
	local compembs = (self.wembcomp * comp_states)
					:add(torch.repeatTensor(self.wembbias, 1, datasize))
	
	for i = 1,self.nwords do
		local compemb = compembs[{{(i-1)*embdim+1,i*embdim},{}}]
		local _, new_w = self:compute_word_probs_states( 
						ws[{i,{}}],	compemb, self.wemb, chainlen,
						{ ws = { alias = sampler_storage.w_ws[i].alias,
								prob = sampler_storage.w_ws[i].prob },
						w_prob = sampler_storage.w_prob[i] } )
		new_ws[{i,{}}]:copy(new_w)
	end

	return nil, new_ws
end

-- compute the change in params, using 1-step Contrastive Divergence (CD1)
-- data is a matrix which each column is a datum point
function AddCompRBM:compute_grads(data, chainlen, sampler_storage, cd_niters)
	local cd_niters = cd_niters or 1
	local datasize = data:size(2)
	local embdim = self.embdim
	local compdim = self.compdim 

	--=============== cd-n ==============--
	-- positive phase
	local pos_ws= data
	local pos_comp_probs, pos_comp_states 
						= self:compute_comp_probs_states(pos_ws)

	-- negative phase
	local neg_comp_probs = pos_comp_probs
	local neg_comp_states = pos_comp_states
	local neg_ws = pos_ws
	
	for iter = 1, cd_niters do
		_,neg_ws = self:compute_words_probs_states(neg_ws, neg_comp_states, 
												chainlen, sampler_storage)
		neg_comp_probs, neg_comp_states 
						= self:compute_comp_probs_states(neg_ws)
	end

	--================ compute grads ================--
	local pos_h = pos_comp_probs
	local neg_h = neg_comp_probs
	local pos_embs = self:compute_embs(pos_ws, self.wemb)
	local neg_embs = self:compute_embs(neg_ws, self.wemb)

	local delta_wembcomp = (pos_embs * pos_h:t() - neg_embs * neg_h:t())
							:div(datasize)

	-- compute delta emb
	local delta_wemb = {}
	local pos_compwemb = (self.wembcomp * pos_h)
						:add(torch.repeatTensor(self.wembbias, 1, datasize))
	local neg_compwemb = (self.wembcomp * neg_h)
						:add(torch.repeatTensor(self.wembbias, 1, datasize))
	for j = 1, datasize do
		local temp0 = pos_compwemb[{{},{j}}]:t()
		local temp1 = neg_compwemb[{{},{j}}]:t()

		for i = 1,self.nwords do
			local id0 = pos_ws[{i,j}]
			local id1 = neg_ws[{i,j}]
			local range = {{},{(i-1)*embdim+1, i*embdim}}

			if delta_wemb[id0]==nil then 
				delta_wemb[id0] = torch.zeros(embdim) 
			end
			delta_wemb[id0]:add(temp0[range])
		
			if delta_wemb[id1]==nil then 
				delta_wemb[id1] = torch.zeros(embdim) 
			end
			delta_wemb[id1]:add(-temp1[range])
		end
	end

	for _,delta in pairs(delta_wemb) do
		delta:div(datasize )
	end

	-- compute delta biases
	local delta_wembbias = (pos_embs - neg_embs):sum(2):div(datasize)
	local delta_compbias = (pos_h - neg_h):sum(2):div(datasize)

	-- compute reconstruction error
	local err = torch.zeros(self.nwords)
	for i = 1,self.nwords do
		err[{i}] = torch.ne(pos_ws[{i,{}}], neg_ws[{i,{}}]):double():sum()
	end
	err:div(datasize)


	if math.mod(batch, nbatch_to_show) == 0 then
		print('--------------')
		print(pos_comp_states:sum() / pos_comp_states:numel())
		print(neg_comp_states:sum() / neg_comp_states:numel())
	end

	return delta_wemb, delta_wembcomp, delta_wembbias, delta_compbias, err
end

batch = 0
nbatch_to_show = 100

-- training
-- data is a matrix, each column is a visible data point
function AddCompRBM:train(data, nepoch, batchsize, init_momentum, 
				final_momentum, eps, weightcost, chainlen, sampler_storage )

	local datasize = data:size(2)	
	local inc_wemb 		= torch.zeros(self.wemb:size())
	local inc_wembcomp 	= torch.zeros(self.wembcomp:size())
	local inc_wembbias	= torch.zeros(self.wembbias:size())
	local inc_compbias	= torch.zeros(self.compbias:size())

	local momentum = init_momentum
	
	for epoch = 1,nepoch do
		print('=== epoch ' ..  epoch .. '  ===')

		--p:start('epoch')
		local total_err = 0
		if epoch > 3 then
			momentum = final_momentum
		end	

		batch = 0
		local total_err = torch.zeros(data:size(1))
		while true do
			--print('batch ' .. batch)
			-- extract a batch
			batch = batch + 1
			local startid_batch = (batch-1) * batchsize + 1
			local endid_batch = math.min(batch*batchsize, datasize)
			local batchdata = data[{{}, {startid_batch,endid_batch}}]
			
			-- update params
			 delta_wemb, delta_wembcomp, delta_wembbias, delta_compbias, err
					= self:compute_grads(batchdata,chainlen,sampler_storage)

			inc_wemb:mul(momentum)
			for i,delta in pairs(delta_wemb) do
				inc_wemb[{i,{}}]:add(delta*eps)
			end

			inc_wembcomp	:mul(momentum):add(delta_wembcomp * eps)
									:add(self.wembcomp * (-eps * weightcost))
			inc_wembbias	:mul(momentum):add(delta_wembbias * eps)
			inc_compbias	:mul(momentum):add(delta_compbias * eps)

			--self.wemb		:add(inc_wemb)
			self.wembcomp	:add(inc_wembcomp)
			self.wembbias	:add(inc_wembbias)
			self.compbias	:add(inc_compbias)
	
			collectgarbage()
			total_err:add(err)

			if math.mod(batch, nbatch_to_show) == 0 then
				print('batch ' .. batch)
				self:save_to_file('comp_model/' .. epoch .. '.model')
				print(total_err:div(nbatch_to_show))
				total_err:fill(0)

				print('--------------')
				print(self.wembbias:sum())
				print(self.wembcomp:sum())
				print(self.compbias:sum())
				print(batchdata)

				io.flush()
			end
		
			if endid_batch == datasize then
				break
			end
		end
	end
end
