H=5;//current+H
ws=3;
gm=.9;
[fdg, err] = mopen("gen0.dat");
//[fdp, err] = mopen("pls0.dat");
//recover optimal policy, epls is now 2 dimension grid
gsize=strtod(mgetl(fdg,1));
grid=mgetl(fdg);
//expert=strtod(mgetl(fdp));
//epls=zeros(size(expert,1)-1,1);
//for i=2:size(expert,1)
//    epls(i-1)=expert(i)-expert(i-1);
//end
//use part to search character
//define feature vectors for a state
//gen random weights
//find optimal strategy for weights ->write RL code add to policy list
//maximize weights for diff V
file("close", fdg);
pact=zeros(gsize,3,3);
pact(:,1,1)=.8*ones(pact(:,1,1));
pact(:,1,2)=.1*ones(pact(:,1,1));
pact(:,1,3)=.1*ones(pact(:,1,1));
pact(:,2,1)=.1*ones(pact(:,1,1));
pact(:,2,2)=.8*ones(pact(:,1,1));
pact(:,2,3)=.1*ones(pact(:,1,1));
pact(:,3,1)=.1*ones(pact(:,1,1));
pact(:,1,2)=.1*ones(pact(:,1,1));
pact(:,3,3)=.8*ones(pact(:,1,1));
pact(1,2,1)=0;
pact(1,2,2)=.9;
pact(1,3,1)=0;
pact(1,3,2)=.2;
pact(gsize,1,3)=0;
pact(gsize,2,3)=0;
pact(gsize,1,2)=.2;
pact(gsize,2,2)=.9;
epls=genpls2([1000;1;1000],1);
//file("close", fdp);
