require 'dict'
require 'utils'
require 'add_vecosem_dbm'
require 'add_comp_rbm'

function load_gold()
	-- load context
	local f = torch.DiskFile(ctx_data_path, 'r')
	f:binary()
	context = f:readObject()
	f:close()

	-- load gold 
	local cases = {}
	local pre_word_id = -1
	local cand_list = { name = {} , number = 0 }

	local iter = 0
	for line in io.lines(gold_path) do
		iter = iter + 1
		--if iter > 200 then break end

		local comps = split_string(line)
		local word_id = dic.word2id[comps[1]]
		local case_id = tonumber(comps[2])
		local ctx = context[{{2,-1},case_id}]:clone()

		if word_id ~= pre_word_id then
			cand_list = { name = {} , number = 0 }
			pre_word_id = word_id
		end

		-- extract gold rank
		local name = {}
		local weight = torch.zeros(#comps / 2)

		for i = 3,#comps,2 do
			local id = (i-1) / 2
			name[id] = dic.word2id[comps[i]] 
			if cand_list.name[name[id]] == nil then 
				cand_list.name[name[id]] = 1
				cand_list.number = cand_list.number + 1
			end
			
			weight[{id}] = tonumber(comps[i+1])
		end

		if #name > 0 then
			cases[#cases+1] = { 
						target = word_id,
						context = ctx,
						gold_rank = {name = name, weight = weight}, 
						cand_list = cand_list }
		end
	end

	return cases
end

function compute_gap( gold_rank, cand_rank )
	cand_rank.weight = torch.zeros(#cand_rank.name)
	for i,na in ipairs(cand_rank.name) do
		for j,ma in ipairs(gold_rank.name) do
			if na == ma then
				cand_rank.weight[i] = gold_rank.weight[j]
			end
		end
	end

	local a = gold_rank.weight --print(a)
	local b = cand_rank.weight --print(b)
	local Ia = torch.gt(a, 0):double() --print(Ia)
	local Ib = torch.gt(b, 0):double() --print(Ib)
	local a_bar = 	torch.cumsum(a, 1)
					:cdiv(torch.linspace(1,a:numel(),a:numel())) 
					--print(a_bar)
	local b_bar = 	torch.cumsum(b, 1)
					:cdiv(torch.linspace(1,b:numel(),b:numel())) 
					--print(b_bar)

	local gap = torch.cmul(Ib,b_bar):sum() / torch.cmul(Ia,a_bar):sum() 
				--print(gap)
	return gap
end

function compute_avg_gap( cases )
	local sum_gap = 0
	for iter,case in pairs(cases) do
		sum_gap = sum_gap + compute_gap(case.gold_rank, case.cand_rank)
	end
	return sum_gap / #cases
end

function compute_score( x, y, measure)
	local measure = measure or 'cos'
	if measure == 'cos' then
		return torch.cdiv(x, torch.repeatTensor(x:norm(2,1),x:size(1),1))
			:cmul(torch.cdiv(y, torch.repeatTensor(y:norm(2,1),y:size(1),1)))
			:sum(1)[{1,{}}]
	elseif measure == 'dot' then
		return torch.cmul(x,y):sum(1)[{1,{}}]
	else
		error(measure .. ' is invalid')
	end
end

function eval( cases , rank_function )
	print('ranking...')
	for iter,case in pairs(cases) do
		local name = {}
		if math.mod(iter, 50) == 0 then print(iter) end

		for ca,_ in pairs(case.cand_list.name) do
			name[#name+1] = ca
		end

		rank = rank_function(case, name)
		case.cand_rank = {name = {}}
		for i = 1,#name do
			case.cand_rank.name[i] = name[rank[{i}]]
		end
	end

	print('compute GAP...')
	return compute_avg_gap(cases)
end

function rank_random( case , cand_names )
	return torch.randperm(#cand_names)
end

function rank_wo_context( case, cand_names )
	local cand_embs = sigmoid(emb:index(1, torch.LongTensor(cand_names)))
	local target_emb = sigmoid(emb[{{case.target},{}}])
	local score = compute_score(cand_embs:t(), 
						torch.repeatTensor(target_emb, #cand_names,1):t())
	-- sort cand
	_,rank = score:sort(1, true) 
	return rank	
end

function rank_context( case, cand_names )

	--dbm_model.nctxwords = 10
	local data = torch.Tensor(dbm_model.nctxwords+1,1+#cand_names)
	data[{{2,-1},{}}]:copy(torch.repeatTensor(
							case.context:resize(dbm_model.nctxwords,1),
							1, 1+#cand_names))
	data[{1,1}] = case.target
	for i,name in ipairs(cand_names) do
		data[{1,i+1}] = name
	end

	-- compute hidden act
	local cws = data[{{2,-1},{}}]
	local ws = data[{{1},{}}]

	local comp_probs_dbm, hid_probs_dbm = dbm_model:mean_field_updates(cws, ws)


	--local comp_probs_rbm, _ = rbm_model:compute_comp_probs_states(ws)
	local wembs = dbm_model:compute_embs(ws, dbm_model.wemb)
	local comp_probs_rbm = sigmoid(
						(dbm_model.wembcomp:t() * wembs )
						:add(torch.repeatTensor(dbm_model.compbias, 1, wembs:size(2))))
	
	-- compute sem
	local compdim = comp_probs_dbm:size(1)
	local hiddim = hid_probs_dbm:size(1)
	local embdim = emb:size(2)

	--local cand_embs = emb:index(1, torch.LongTensor(cand_names))
	--local target_emb = emb[{{case.target},{}}]

	local sem = torch.zeros(compdim --[[+ hiddim + embdim]], 1 + #cand_names)
	sem[{{1,compdim},{2,-1}}]				:copy(comp_probs_rbm[{{},{2,-1}}])
	sem[{{1,compdim},{1}}]					:copy(comp_probs_dbm[{{},{1}}])
	--sem[{{compdim+1,compdim+hiddim},{}}]:copy(hid_probs*0)
	--sem[{{compdim+hiddim+1,-1},{1}}]	:copy(target_emb:t())
	--sem[{{compdim+hiddim+1,-1},{2,-1}}]	:copy(cand_embs:t())

	--local comp_probs,_ = dbm_model:compute_comp_probs_states(ws)
	--sem = comp_probs	

	-- compute scores
	local score = compute_score(sem[{{},{2,-1}}], 
					torch.repeatTensor(sem[{{},{1}}]:clone(), 1, #cand_names),
					'cos')
	--print(score)
	_,rank = score:sort(1, true)
	--print(rank)
	return rank
end

if #arg == 5 then

	dic_emb_path = arg[1]
	gold_path = arg[2]
	ctx_data_path = arg[3]
	dbm_model_path = arg[4]
	rbm_model_path = arg[5]

	-- load dic & emb
	print('load dic & emb...')
	f = torch.DiskFile(dic_emb_path, 'r')
	dic = f:readObject()
	setmetatable(dic, Dict_mt)
	emb = f:readObject()

	-- load models
	print('load dbm_model...')
	dbm_model = AddVeCoSemDBM:load_from_file(dbm_model_path)
	rbm_model = AddCompRBM:load_from_file(rbm_model_path)

	-- load gold
	print('load gold and context...')
	cases = load_gold()
	if dbm_model.nctxwords ~= cases[1].context:numel() then
		error('window sizes do not match')
	end

	-- eval
	rank_function = rank_context
	print(eval(cases, rank_function))

else

	print('<dic_emb_path> <gold_path> <ctx_path> <dbm_model_path> <rbm_model_path>')
end
