require 'tree'

NPROCESS = 2
FIX_L = false

--**************** RAE class ******************--
reAutoEncoder = {}
reAutoEncoder_mt = {__index = reAutoEncoder}

--****************** needed functions ******************--
-- generate a n x m matrix by uniform distibution within range [min,max]
function uniform( n, m, min, max )
	local M = torch.rand(n, m)
	M:mul(max-min):add(min)
	return M
end

-- normalization
-- input: 
-- 	X : n x m matrix, each colum is a n-dim vector
-- 	p : norm
-- output: norm_p of 
function normalize( X , p)
	if p == 1 then
		return torch.cdiv(X, torch.mm( torch.ones(X:size(1),1),  X:sum(1)  ))
	elseif p == 2 then	
		return torch.cdiv(X, torch.mm( torch.ones(X:size(1),1), torch.sqrt(torch.pow(X,p):sum(1))   ))
	else 
		return torch.cdiv(X, torch.mm( torch.ones(X:size(1),1), torch.pow(torch.pow(X,p):sum(1),1/p)  ))
	end
end

-- logistic function
function logistic( X )
	return torch.cdiv (
			torch.ones(X:size()), 
			torch.exp( torch.mul(X,-1) ):add(1)
		)
end

-- derivative of logistic function
-- input : 
-- 	logiX : logisitic(X)
function logisticPrime( logiX )
	local D = torch.mul(logiX, -1):add(1)
	return torch.cmul(D, logiX)
end

-- tanh function 
-- in range [-1,1]
function tanh( X )
	return torch.tanh(X)
end

-- derivative of norm2 tanh
-- input:
-- 	tanhX = tanh(X) must be a n x 1 vector 
-- output:
-- 	grad : n x n matrix
function norm2TanhPrime( tanhX )
	local dim = tanhX:size(1)
	local tanhX2 = torch.pow(tanhX,2)
	local OneMinusTanhX2 = torch.mul(tanhX2,-1):add(1)
	local nrm2 = math.sqrt(tanhX2:sum())

	local P1 = torch.diag( torch.mul(OneMinusTanhX2,1/nrm2):resize(dim)  )
	local P2 = torch.mm(tanhX, torch.cmul(tanhX, OneMinusTanhX2):t():mul(1/(nrm2*nrm2*nrm2)))
	return P1 - P2
end

--************************* construction ********************--
-- create a new recursive autor encoder with a given structure
-- input:
-- 	struct = { dimension, nCategories, Lookup }
function reAutoEncoder:new( struct )
	local rae = {}
	local dim = struct.Lookup:size(1)
	local dicLen = struct.Lookup:size(2)
	local nCat = struct.nCategory
	
	rae.W11 = uniform(dim, dim, -1, 1)
	rae.W12 = uniform(dim, dim, -1, 1)
	rae.b1 = uniform(dim, 1, -1, 1)

	rae.W21 = uniform(dim, dim, -1, 1)
	rae.W22 = uniform(dim, dim, -1, 1)
	rae.b21 = uniform(dim, 1, -1, 1)
	rae.b22 = uniform(dim, 1, -1, 1)

	rae.WCat = uniform(nCat, dim, -1, 1)
	rae.bCat = uniform(nCat, 1, -1, 1)
	
	rae.L = normalize(struct.Lookup, 2)
	rae.func = struct.func
	rae.funcPrime = struct.funcPrime

	rae.Wh = uniform(dim, dim, -1, 1)
	rae.Wlcw = uniform(dim, dim, -1, 1)
	rae.Wrcw = uniform(dim, dim, -1, 1)
	
	setmetatable(rae, reAutoEncoder_mt)
	return rae
end

--[[load network from file
function reAutoEncoder:load( filename , func, funcPrime )
	local file = torch.DiskFile.new(filename)
	local buff = file:readInt(3)
	local dim = buff[1]
	local dicLen = buff[2]
	local nCat = buff[3]

	local rae = {}
	setmetatable(rae, reAutoEncoder_mt)

	local Theta = torch.Tensor(file:readDouble(dim*dicLen + dim*dim*4 + dim*3 + dim*nCat + nCat))
	file:close()

	local old_FIXL = FIX_L
	FIX_L = false
	rae:unfold(Theta, dim, dicLen, nCat)
	FIX_L = old_FIXL
	
	rae.func = func or tanh
	rae.funcPrime = funcPrime or norm2TanhPrime
	rae.L = normalize(rae.L, 2)

	return rae
end
]]

-- save rae into a bin file
function reAutoEncoder:save( filename )
	local file = torch.DiskFile(filename, 'w')
	file:binary()
	file:writeObject(self)
	file:close()
end

-- create rae from file
function reAutoEncoder:load( filename , func, funcPrime )
	local file = torch.DiskFile(filename, 'r')
	file:binary()
	local rae = file:readObject()
	setmetatable(rae, reAutoEncoder_mt)
	file:close()

	rae.func = func or tanh
	rae.funcPrime = funcPrime or norm2TanhPrime 
	return rae
end


-- fold parameters to a vector
function reAutoEncoder:fold( Model )
	local Params = {}
		Model = Model or {}
		Params[1] = Model.W11 or self.W11
		Params[2] = Model.W12 or self.W12
		Params[3] = Model.b1 or self.b1
		Params[4] = Model.W21 or self.W21
		Params[5] = Model.W22 or self.W22
		Params[6] = Model.b21 or self.b21
		Params[7] = Model.b22 or self.b22
		Params[8] = Model.WCat or self.WCat
		Params[9] = Model.bCat or self.bCat

		Params[10] = Model.Wh or self.Wh
		Params[11] = Model.Wlcw or self.Wlcw
		Params[12] = Model.Wrcw or self.Wrcw

		if not FIX_L then Params[13] = Model.L or self.L end

	local dim = self.L:size(1)
	local dicLen = self.L:size(2)
	local nCat = self.bCat:size(1)

	local Theta = torch.zeros(dim*dicLen + dim*dim*7 + dim * 3 + dim*nCat + nCat)
	local i = 1
	for _,P in ipairs(Params) do
		local nElem = P:nElement()
		Theta[{{i,i+nElem-1}}] = P
		i = i + nElem
	end

	return Theta
end

-- unfold param-vector 
function reAutoEncoder:unfold( Theta , dim, dicLen, nCat )
	if not FIX_L then self.L = self.L or torch.Tensor(dim, dicLen) 
	elseif self.L == nil then error('L is nil') end
	self.W11 = self.W11 or torch.Tensor(dim, dim)
	self.W12 = self.W12 or torch.Tensor(dim, dim)
	self.b1 = self.b1 or torch.Tensor(dim, 1)
	self.W21 = self.W21 or torch.Tensor(dim, dim)
	self.W22 = self.W22 or torch.Tensor(dim, dim)
	self.b21 = self.b21 or torch.Tensor(dim, 1)
	self.b22 = self.b22 or torch.Tensor(dim, 1)
	self.WCat = self.WCat or torch.Tensor(nCat, dim)
	self.bCat = self.bCat or torch.Tensor(nCat, 1)
	self.Wh = self.Wh or torch.Tensor(dim, dim)
	self.Wlcw = self.Wlcw or torch.Tensor(dim, dim)
	self.Wrcw = self.Wrcw or torch.Tensor(dim, dim)

	local Params = {}

		Params[1] = self.W11
		Params[2] = self.W12
		Params[3] = self.b1
		Params[4] = self.W21
		Params[5] = self.W22
		Params[6] = self.b21
		Params[7] = self.b22
		Params[8] = self.WCat
		Params[9] = self.bCat
		Params[10] = self.Wh
		Params[11] = self.Wlcw
		Params[12] = self.Wrcw
		if not FIX_L then Params[13] = self.L end 

	local i = 1
	for _,P in ipairs(Params) do
		local nElem = P:nElement()
		P:copy(Theta[{{i,i+nElem-1}}])
		i = i + nElem
	end
end

--************************ forward **********************--
--input:
--	Tree : compact tree
--	config : {alpha, lambda}
--output:
--	Tree
function reAutoEncoder:forward( Tree, config )

	local W11 = self.W11
	local W12 = self.W12
	local W21 = self.W21
	local W22 = self.W22
	local b1 = self.b1
	local b21 = self.b21
	local b22 = self.b22
	local WCat = self.WCat
	local bCat = self.bCat
	local Wh = self.Wh
	local Wlcw = self.Wlcw
	local Wrcw = self.Wrcw
	local L = self.L
	local func = self.func
	local funcPrime = self.funcPrime

	local dim = self.L:size(1)
	local dicLen = self.L:size(2)
	local nCat = self.bCat:size(1)

	local config = config or {alpha = 1, lambda = 1e04}	-- default is unsupervised learning
	local alpha = config.alpha

	-- building tree
	local nNode = #Tree
	local words = {}
	for i = 1, nNode do
		local node = Tree[i]
		if #node.childId == 0 then
			local pos = #words+1
			words[pos] = node.label
			node.pos = {pos, pos}
			node.label = tonumber(node.label)
		end
	end 

	for i = nNode,1,-1 do 
		local node = Tree[i]
		if node.cat == nil then
			if not node.meaningful then node.cat = torch.Tensor({0,1}) else node.cat = torch.Tensor({1,0}) end
			node.cat = node.cat:resize(2,1)
		end

		-- for leaves
		if #node.childId == 0 then
			node.feature = L[{{},{node.label}}] 
			node.cover = 1

			-- classify on single words
			node.predict = normalize( torch.mm(WCat,node.feature):add( bCat ):exp(), 1 )
			node.ecat = (-torch.cmul(node.cat, torch.log(node.predict))):sum() * (1-alpha)

		-- for internal nodes
		else
			local child1 = Tree[node.childId[1]]
			local child2 = Tree[node.childId[2]]
			
			local pos = {child1.pos[1], child2.pos[2]}
			local cover = pos[2] - pos[1] + 1

			local C1 = child1.feature
			local C2 = child2.feature
			local c1Cover = child1.pos[2] - child1.pos[1] + 1
			local c2Cover = child2.pos[2] - child2.pos[1] + 1
			
			local lCtx = L[{{}, {dicLen - 1}}]; if pos[1] > 1 then lCtx = L[{{}, {words[pos[1] - 1]}}] end
			local rCtx = L[{{}, {dicLen}}]; if pos[2] < #words then rCtx = L[{{}, {words[pos[2] + 1]}}] end
			local head = L[{{}, {node.hWord}}] 

			-- cal parent feature
			local unnormFeature = func( torch.mm(W11,C1):add(torch.mm(W12,C2)):
					add(torch.mm(Wh,head)):
					add(torch.mm(Wlcw,lCtx)):
					add(torch.mm(Wrcw,rCtx)):
					add(b1)  ) 
			local feature = normalize(unnormFeature, 2) 

			-- reconstruct child features
			local unRCC1 = func( torch.mm(W21, feature):add(b21) ) 
			local unRCC2 = func( torch.mm(W22, feature):add(b22) ) 
			local rcC1Feature = normalize(unRCC1, 2) 
			local rcC2Feature = normalize(unRCC2, 2) 
			local c1Diff = torch.add(rcC1Feature,-C1)
			local c2Diff = torch.add(rcC2Feature,-C2)

			-- cal reconstruction error
			local erec = alpha * (torch.pow(c1Diff,2):sum()*(c1Cover/cover) + torch.pow(c2Diff,2):sum()*(c2Cover/cover))

			-- compute classification error
			local predict = normalize( torch.mm(WCat,feature):add( bCat ):exp(), 1 )
			local ecat = (-torch.cmul(node.cat, torch.log(predict))):sum() * (1-alpha)

			node.lCtxFeature = lCtx
			node.rCtxFeature = rCtx
			node.headFeature = head

			node.pos = pos
			node.unnormFeature = unnormFeature
			node.feature = feature
			node.unRCC1 = unRCC1 
			node.unRCC2 = unRCC2
			node.rcC1Feature = rcC1Feature
			node.rcC2Feature = rcC2Feature
			node.c1Diff = c1Diff:mul(2*alpha*c1Cover/cover)
			node.c2Diff = c2Diff:mul(2*alpha*c2Cover/cover)
			node.erec = erec
			node.predict = predict
			node.ecat = ecat
		end
	end
	return Tree, words
end

--*********************** backpropagate *********************--
-- only for one tree/sentence
-- input:
-- 	tree : result of the parse function
-- 	config :
-- output:
function reAutoEncoder:backpropagate( tree, words, config , grad )

	local dim = self.L:size(1)
	local dicLen = self.L:size(2)
	local nCat = self.bCat:size(1)

	local GW2 = torch.Tensor(2*dim + nCat,dim)
	GW2[{{1,dim},{}}] = self.W21
	GW2[{{dim+1,2*dim},{}}] = self.W22
	GW2[{{2*dim+1,2*dim+nCat},{}}] = self.WCat

	local gradL = grad.L
	local gradW11 = grad.W11
	local gradW12 = grad.W12
	local gradb1 = grad.b1
	local gradW21 = grad.W21
	local gradW22 = grad.W22
	local gradb21 = grad.b21
	local gradb22 = grad.b22
	local gradWCat = grad.WCat
	local gradbCat = grad.bCat
	local gradWh = grad.Wh
	local gradWlcw = grad.Wlcw
	local gradWrcw = grad.Wrcw
	local cost = 0

	local W11 = self.W11
	local W12 = self.W12
	local W21 = self.W21
	local W22 = self.W22
	local b1 = self.b1
	local b21 = self.b21
	local b22 = self.b22
	local WCat = self.WCat
	local bCat = self.bCat
	local Wh = self.Wh
	local Wlcw = self.Wlcw
	local Wrcw = self.Wrcw
	local W2 = GW2

	local L = self.L
	local func = self.func
	local funcPrime = self.funcPrime

	local dim = L:size(1)
	local dicLen = L:size(2)
	local nCat = self.bCat:size(1)

	local alpha = config.alpha

	local support = {}
	support[1] = {
		diff = torch.zeros(dim,1),
		W = torch.zeros(dim,dim),
		gradZp = torch.zeros(dim,1) }

	local nNode = #tree

	for i = 1,nNode do
		local node = tree[i]
		local diff = support[i].diff
		local mDiff = -diff
		local W = support[i].W
		local gradZp = support[i].gradZp

		-- for internal node
		if #node.childId > 0 then
			-- compute cost
			cost = cost + node.erec + node.ecat
			
			-- compute gradZ
			local gradZ21 = torch.mm(node.c1Diff:t(), funcPrime(node.unRCC1)):t()
			local gradZ22 = torch.mm(node.c2Diff:t(), funcPrime(node.unRCC2)):t()
			local gradZCat = torch.add(node.predict,-node.cat):mul(1-alpha )
 
			local gradZ2 = torch.Tensor(2*dim+nCat, 1)
			gradZ2[{{1,dim},{}}] = gradZ21
			gradZ2[{{dim+1,2*dim},{}}] = gradZ22
			gradZ2[{{2*dim+1,2*dim+nCat},{}}] = gradZCat
			local gradZ = torch.mm(funcPrime(node.unnormFeature):t(), torch.mm(W2:t(), gradZ2 )
					:add( torch.mm(W:t(), gradZp) ):add(mDiff) )

			-- compute gradient
			gradW21:add(torch.mm(gradZ21, node.feature:t()))
			gradb21:add(gradZ21)
			gradW22:add(torch.mm(gradZ22, node.feature:t()))
			gradb22:add(gradZ22)
			gradWCat:add(torch.mm(gradZCat, node.feature:t()))
			gradbCat:add(gradZCat)

			local child1 = tree[node.childId[1]]
			local child2 = tree[node.childId[2]]

			gradW11:add(torch.mm(gradZ, child1.feature:t())) 
			gradW12:add(torch.mm(gradZ, child2.feature:t()))
			gradb1:add(gradZ)

			gradWh:add(torch.mm(gradZ, node.headFeature:t()))
			gradWlcw:add(torch.mm(gradZ, node.lCtxFeature:t()))
			gradWrcw:add(torch.mm(gradZ, node.rCtxFeature:t()))

			if not FIX_L then 
				gradL[{{},{node.hWord}}]:add( torch.mm(Wh:t(),gradZ) ) 
				local p = words[node.pos[1] - 1]; if p == nil then p = dicLen - 1 end
				gradL[{{},{p}}]:add( torch.mm(Wlcw:t(),gradZ) )
				p = words[node.pos[2] + 1]; if p == nil then p = dicLen end
				gradL[{{},{p}}]:add( torch.mm(Wrcw:t(),gradZ) )
			end

			-- propagate to its children
			support[node.childId[1]] = {diff = node.c1Diff, W = W11, gradZp = gradZ}
			support[node.childId[2]] = {diff = node.c2Diff, W = W12, gradZp = gradZ}

		else -- leaf
			-- compute cost
			cost = cost + node.ecat

			-- compute gradZ
			local gradZCat = torch.add(node.predict,-node.cat):mul( 1-alpha )
			local gradZ = torch.mm(W:t(), gradZp):add(mDiff):add( torch.mm( WCat:t(), gradZCat  )  )

			-- compute gradient
			gradWCat:add(torch.mm(gradZCat, node.feature:t()))
			gradbCat:add(gradZCat)
			if not FIX_L then gradL[{{},{node.label}}]:add( gradZ ) end
		end
	end
	
	return cost
end

--************************ compute cost and gradient *****************--
--input:
--output:
require 'parallel'

-- worker function 
function worker()

	require 'RAE'
	local data = parallel.parent:receive()

	local Forest = data.Forest
	local rae = data.rae
	local nSample = #Forest
	local config = data.config
	local forParsing = data.forParsing

	local grad = {
		W11 = torch.zeros(rae.W11:size()),
		W12 = torch.zeros(rae.W12:size()),
		b1 = torch.zeros(rae.b1:size()),
		W21 = torch.zeros(rae.W21:size()),
		W22 = torch.zeros(rae.W22:size()),
		b21 = torch.zeros(rae.b21:size()),
		b22 = torch.zeros(rae.b22:size()),
		WCat = torch.zeros(rae.WCat:size()),
		bCat = torch.zeros(rae.bCat:size()),
		Wh = torch.zeros(rae.Wh:size()),
		Wlcw = torch.zeros(rae.Wlcw:size()),
		Wrcw = torch.zeros(rae.Wrcw:size())	
	}
	if not FIX_L then grad.L = torch.zeros(rae.L:size()) end

	local cost = 0
	local timer = torch.Timer()
	for i = 1, nSample do
		local Tree = Forest[i]
		local tree, words = reAutoEncoder.forward(rae, Tree, config)
		if not forParsing then
			cost = cost + reAutoEncoder.backpropagate(rae, tree, words, config , grad)
		end
	end
	print('time for child running ' .. timer:time().real) io.flush()

	if not forParsing then 	Forest = nil 
	else 
		for i = 1,nSample do
			local t = Forest[i]
			local newt = {}
	
			local tNeg = 0
			local tPos = 0
			local fNeg = 0
			local fPos = 0

			for _,node in ipairs(t) do
				
				local _,pdClass = node.predict:max(1)
				pdClass = pdClass[{1,1}] 
				local _,class = node.cat:max(1)
				class = class[{1,1}]
				local label = node.wordId or 'X'
				if node.meaningful then label = label .. 'm' end

				if #node.childId > 0 then
					if class == 1 and pdClass == 1 then tPos = tPos + 1  label = label .. '.tp' end
					if class == 1 and pdClass == 2 then fNeg = fNeg + 1  label = label .. '.fn' end
					if class == 2 and pdClass == 1 then fPos = fPos + 1  label = label .. '.fp' end
					if class == 2 and pdClass == 2 then tNeg = tNeg + 1  label = label .. '.tn' end
				end
				
				
				if #node.childId > 0 then newt[#newt+1] = node.predict[{1,1}] end-- { label = label, childId = node.childId , score = node.predict , meaningful = node.meaningful}
				--if #node.childId == 0 then newt[#newt].wordId = node.wordId end
			end
			newt[1].tPos = tPos
			newt[1].tNeg = tNeg
			newt[1].fPos = fPos
			newt[1].fNeg = fNeg
			Forest[i] = newt[1]
		end
	end
	parallel.parent:send( { cost = cost, grad = reAutoEncoder.fold(rae, grad) , Trees = Forest } )
end
	
-- parent call
function parent(param)

	local Forest = param.Forest
	local nSample = #Forest
	local rae = param.rae
	local forParsing = param.forParsing

	-- split data
	local size = math.ceil(nSample / NPROCESS)
	local children = parallel.sfork(NPROCESS)
	children:exec(worker)

	-- send data
	local timer = torch.Timer()
	for i = 1, NPROCESS do
		local data = {Forest = {}, rae = param.rae, config = param.config, forParsing = forParsing}
		for j = 1,size do
			local id = (i-1)*size+j
			if id > nSample then break end
			data.Forest[j] = Forest[id]
		end
		children[i]:send(data) 
	end
	print('time for parent -> children ' .. timer:time().real) io.flush()

	-- receive results
	timer = torch.Timer()
	for i = 1, NPROCESS do
		local reply = children[i]:receive()
		param.totalCost = param.totalCost + reply.cost
		if param.totalGrad == nil then
			param.totalGrad = reply.grad
		else
			param.totalGrad:add(reply.grad)
		end

		if forParsing then
			if param.Trees == nil then param.Trees = {} end
			for j = 1,#reply.Trees do
				param.Trees[#param.Trees+1] = reply.Trees[j]
			end
		end
	end
	print('time for children -> parent ' .. timer:time().real) io.flush()

	timer = torch.Timer()
	children:sync()
	print('time for sync ' .. timer:time().real) io.flush()

	-- finalize
	local M = param.rae:fold()
	param.totalCost = param.totalCost * (1/nSample) + param.config.lambda/2 * torch.pow(M,2):sum()
	param.totalGrad:mul(1/nSample):add(torch.mul(M,param.config.lambda))
end

function reAutoEncoder:computeCostAndGrad( Forest, config , forParsing )

	local param = {
		rae = self,
		config = config,
		Forest = Forest,
		totalCost = 0,
		totalGrad = nil,
		forParsing = forParsing or false
	}


	local ok,err = pcall(parent, param)
	if not ok then 	print(err) parallel.close() end
	
	return param.totalCost, param.totalGrad, param.Trees


--[[ for single process
	local grad = {
		L = torch.zeros(self.L:size()),
		W11 = torch.zeros(self.W11:size()),
		W12 = torch.zeros(self.W12:size()),
		b1 = torch.zeros(self.b1:size()),
		W21 = torch.zeros(self.W21:size()),
		W22 = torch.zeros(self.W22:size()),
		b21 = torch.zeros(self.b21:size()),
		b22 = torch.zeros(self.b22:size()),
		WCat = torch.zeros(self.WCat:size()),
		bCat = torch.zeros(self.bCat:size()),
		Wh = torch.zeros(self.Wh:size()),
		Wlcw = torch.zeros(self.Wlcw:size()),
		Wrcw = torch.zeros(self.Wrcw:size())
	}
	if not FIX_L then grad.L = torch.zeros(self.L:size()) end

	local cost = 0
	local Trees = {}
	local nSample = #Forest
	for i = 1,nSample  do
		local Tree = Forest[i] 
		local tree, words = self:forward(Tree, config)
		cost = cost + self:backpropagate(tree, words, config , grad)
		Trees[i] = tree
	end

	return cost/nSample, self:fold(grad):mul(1/nSample), Trees
]]
end

-- check gradient
function reAutoEncoder:checkGradient(Forest, config)
	local epsilon = 1e-4
	local theta = 1e-8

	local dim = self.L:size(1)
	local dicLen = self.L:size(2)
	local nCat = self.bCat:size(1)

	local good = true
	local Theta = self:fold()
	local _, gradTheta = self:computeCostAndGrad(Forest, config)

	local n = dim*dicLen + 7*dim*dim + 3*dim + dim*nCat + nCat
	if FIX_L then n = n - dim*dicLen end
	for i = 1,n do
		local index = {{i}}
		Theta[index]:add(epsilon)
		self:unfold(Theta)
		local costPlus,_ = self:computeCostAndGrad(Forest, config)
		
		Theta[index]:add(-2*epsilon)
		self:unfold(Theta)
		local costMinus,_ = self:computeCostAndGrad(Forest, config)
		Theta[index]:add(epsilon)
		self:unfold(Theta)

		local diff = math.abs( (costPlus - costMinus) / (2*epsilon) - gradTheta[i] )
		print('diff ' .. diff)

		if diff > theta then 
			good = false
			--break
		end
	end

	return good
end

--***************************** parse **************************
function reAutoEncoder:parse( Forest )
	local config = {alpha = 1, lambda = 0}

	local _,_,Trees = self:computeCostAndGrad( Forest, config , true) -- forParsing = true
        return Trees
end

function reAutoEncoder:eval( Forest )
	local Trees = self:parse(Forest)

	local tNeg = 0
	local tPos = 0
	local fNeg = 0
	local fPos = 0
	
	--local file = io.open('parse.txt', 'w')
	for _,t in ipairs(Trees) do
		tNeg = tNeg + t[1].tNeg
		tPos = tPos + t[1].tPos
		fNeg = fNeg + t[1].fNeg
		fPos = fPos + t[1].fPos
		--file:write(tree.comp2string(t) .. '\n')
	end
	--file:close()

	
--	local file = io.open('sscore.txt', 'w')
--	for _,t in ipairs(Trees) do
--		local str = ''
--		for _,node in ipairs(t) do
--			--[[
--			if #node.childId > 0 then
--				local str
--				if node.meaningful then str = '1\t'
--				else str = '0\t' end
--				str = str .. node.score[{1,1}] .. '\t' .. node.score[{2,1}]
--				file:write(str .. '\n')
--			end
--			]]
--			str = str .. node .. ' '
--		end
--		file:write(str .. '\n')
--	end
--	file:close()

	-- for visualization
	print('precision ' .. tPos / (tPos + fPos))
	print('recall ' .. tPos / (tPos + fNeg))
	print('true neg rate ' .. tNeg / (tNeg + fPos))
	print('accuracy ' .. (tPos + tNeg) / (tPos + fPos + tNeg + fNeg))
		print('tpos rate ' .. (tPos) / (tPos + fPos + tNeg + fNeg))
		print('tneg rate ' .. (tNeg) / (tPos + fPos + tNeg + fNeg))
		print('fpos rate ' .. (fPos) / (tPos + fPos + tNeg + fNeg))
		print('fneg rate ' .. (fNeg) / (tPos + fPos + tNeg + fNeg))
	io.flush()
	

	return (tPos + tNeg) / (tPos + fPos + tNeg + fNeg)
end

--******************************* train networks *************************
---- optFunc from 'optim' package
function reAutoEncoder:train( trainForest, testForest, batchSize, optFunc, optFuncState, config)
	local nSample = #trainForest
	local j = 0

	local iter = 1
	local timer = torch.Timer()
	
	print('accuracy = ' .. self:eval(testForest)) io.flush()

	local function func( M )
		print('time for optim ' .. timer:time().real) io.flush()
		self:unfold(M)

		-- extract data
		local timer1 = torch.Timer()
		j = j + 1
		if j > nSample/batchSize then j = 1 end
		local subForest = {}
		for k = 1,batchSize do
			subForest[k] = trainForest[k+(j-1)*batchSize]
		end
		print('time to extract data ' .. timer1:time().real) io.flush()

		timer1 = torch.Timer()
		local cost, Grad = self:computeCostAndGrad(subForest, config)
		print('time to compute cost & grad ' .. timer1:time().real) io.flush()

		-- for visualization
		if math.mod(iter,1) == 0 then
			print('--- iter: ' .. iter)
			print('cost: ' .. cost)
			io.flush()
		end
		if math.mod(iter,10) == 0 then
			print('accuracy = ' .. self:eval(testForest))
			self:save('model.' .. math.floor(iter / 10))
			io.flush()
		end

		iter = iter + 1
		collectgarbage()
		
		timer = torch.Timer()
		return cost, Grad
	end

	local M = optFunc(func, self:fold(), optFuncState, optFuncState)
	self:unfold(M)
end


--*********************************** main ******************************--
function test ()
	local rae = reAutoEncoder:load( 'rae.txt' )
	local t1 = tree:createFromString('(X (X (X (X (X 1) (X 4)) (X 6)) (X (X 3) (X 5))) (X (X 2) (X 3)))')
	local t2 = tree:createFromString('(X (X 1) (X (X 2) (X 3)))')

	t1:binarize(true)
	t1:removeTags()
	t2:binarize(true)
	t2:removeTags()

	print(t1:toString())

	local config = {lambda = 1e-4, alpha = 0.2}
	rae:checkGradient({t1},config)
end

--test()
