//gen random weights
//get optimal policy

//get new optimal policy , add to set of policies
//--run for n times--
w=rand(ws,1);
//w=[1000;1;1000];
//w=[0;1;0];
w=w/norm(w,1);
lmd=0.9;
//ws,ns have size
//pls=cell();
//pls(1).entries=qlearn(w);

for ns=1:20
    //compute Da
//    z1s=zeros((gsize+2),3*gsize,3*gsize);
//    //z1sd=zeros((gsize+2),3*gsize,3*gsize);
//    z1a=zeros((gsize+2),3*gsize,3*gsize,3);
//    //z1ad=zeros((gsize+2),3*gsize,3*gsize,3);
//    z2s=zeros((gsize+2),3*gsize,3*gsize);
//    z2a=zeros((gsize+2),3*gsize,3*gsize,3);
    Da=eefc(w);
    //compute gradient
    fs=evalr(epls);
    dw=0;
    for i=1:gsize
        for t=1:3*gsize
            //for sa=1:3*gsize
                for j=1:3
                    dw=dw+Da(i,t,j)*genf(i-1+j-2,t,j-2);//rw(i-1,t-1,sa-1,j-2,w);
                end
            //end
        end
    end
    //update weights
    dw=dw/sum(Da)*(3*gsize-1);
    //dw=(fs')/size(epls,1)-dw; //TODO change
    dw=fs'-dw;
    w=w.*exp(lmd*dw/ns);
    //w=w-lmd*dw/ns;
    w=w/norm(w,1);
    //printf("%d\n",dw);
end
//pls(ns+1).entries=qlearn(w);

