clear
addpath('../qLearning','../../function_approximator/neural_net','../../tetrisState')
%% initialize the problem
global paramDiscountFactor;
global paramDeathPenalty;
global paramIncrementalLoss;
global paramStateSize;
global paramActionSize;
global paramStateRows;
global paramStateCols;
global legalMoves;

paramDiscountFactor = 0.75;
paramDeathPenalty = -100.0;
paramIncrementalLoss = -0.01;

paramStateSize = 21*10 +1;
paramActionSize = 1;
paramStateRows = 21;
paramStateCols = 10;
minPenalty = paramStateSize*paramIncrementalLoss + paramDeathPenalty;
maxReward = 0;
[minPossibleQ, maxPossibleQ] = getMinMaxQ(paramDiscountFactor,minPenalty,maxReward);
%extractFeatures()
fname = 'human_data1.mat';
[ Dstates, Dactions, DnextStates, Drewards,legalMoves ] = humanTrajectoryToLearningFormat(fname);
reward = getStateReward(Dstates,paramIncrementalLoss,paramDeathPenalty); % adding rewards to states

%% initialize the neural network
net = initializeNNIPOP(Dstates,Dactions,minPossibleQ,maxPossibleQ);

%% learn neural network
net = runInnerLearningLoop(net,Dstates,Dactions,DnextStates);

%% run fitted Q
gameState = [];
numberofTransitions = 500;
viewData = 1;
for episode= 1:1000
% Exectue Game -- collectExprience; 
[Dstates,Dactions,DnextState,gameState] = collectExperience(net,gameState,Dstates,Dactions,DnextStates,numberofTransitions,viewData);
% runInnerLearningLoop
net = runInnerLearningLoop(net,Dstates,Dactions,DnextStates);
save all
end