//gen random weights
//get optimal policy

//get new optimal policy , add to set of policies
//--run for n times--
w=rand(ws,1);
//w=[1000;1;1000];
//w=[0;1;0];
w=w/norm(w,1);
lmd=0.9;
dt=1;
global z2sg
for ns=1:20
//ws,ns have size
//pls=cell();
//pls(1).entries=qlearn(w);
    dws=zeros(ws,1);
    for kn=0:ngen
        epls=gepls(kn+1).entries;
        gsize=ggsize(kn+1);
        grid=ggrid(kn+1).entries;
        pact=gpact(kn+1).entries;
    //compute Da
//    z1s=zeros((gsize+2),3*gsize,3*gsize);
//    //z1sd=zeros((gsize+2),3*gsize,3*gsize);
//    z1a=zeros((gsize+2),3*gsize,3*gsize,3);
//    //z1ad=zeros((gsize+2),3*gsize,3*gsize,3);
//    z2s=zeros((gsize+2),3*gsize,3*gsize);
//    z2a=zeros((gsize+2),3*gsize,3*gsize,3);
        z2sg=zeros((gsize),3*gsize+H);
        z2sg(1,1)=1;//start pos
        Da=zeros(gsize,3*gsize,3);
    //fs=evalr(epls);
        for k=0:dt:3*gsize-1
            db=eefc(w,k);
            Da(:,k+1:k+dt,:)=db(:,1:dt,:);
    //Da=eefc(w);
    //compute gradient
        end
        fs=evalr2(epls);
        dw=0;
        for i=1:gsize
            for t=1:3*gsize
            //for sa=1:3*gsize
                for j=1:3
                    dw=dw+Da(i,t,j)*genf(i-1+j-2,t,j-2);//rw(i-1,t-1,sa-1,j-2,w);
                end
            //end
            end
        end
    //update weights
        dw=dw/sum(Da)*(3*gsize-1);
    //dw=(fs')/size(epls,1)-dw; //TODO change
        dw=fs'-dw;
        dws=dws+dw;
        
    //printf("%d\n",dw);
    end
    dws=dws/(ngen+1);
    //dws(1)=dws(1)*3;
    //dws(3)=dws(3)*H*1.5;
    w=w.*exp(lmd*dws/ns);
    //w=w-lmd*dw/ns;
    w=w/norm(w,1);
end
//pls(ns+1).entries=qlearn(w);

