% The training function. Inputting the number of episodes (number of runs).
function [hw7Agent] = hw7Train(numEpisodes)
%Setting the constants and variables
RMAX = 25;
CMAX = 25;
RMIN = 1;
CMIN  = 1;
MIN_PATH = 7;
MAX_PATH = 25;
distFromEnd = 0;
distFromMidPath = 0;

% learning rate
LEARN_RATE = 0.8;
DISCOUNT_FACT = 0;

% Number of actions in every state. 5 - look E,N,N,NE,SE
%Let 1-N, 2-NE, 3-E, 4-SE, 5-S
NUM_ACTIONS = 5;
% Iterate over the number of episodes

hw7Agent = struct('grass',zeros(RMAX,CMAX,NUM_ACTIONS),'litter',zeros(RMAX,CMAX,NUM_ACTIONS));
Q1=zeros(RMAX,CMAX,NUM_ACTIONS); % grass q table
Q2=zeros(RMAX,CMAX,NUM_ACTIONS); % litter q table

for episodes=1:numEpisodes
	%generate map of -1's
	gMap = zeros(RMAX,CMAX)-1;
	% generate random number between MIN_PATH and MAX_PATH
	pathSize = uint8((MAX_PATH-MIN_PATH)*rand(1)+MIN_PATH);
	%get path start position based on size
	maxPathRange = RMIN+RMAX - pathSize;
	pathStart =  uint8((maxPathRange-RMIN)*rand(1)+RMIN);
    %create path in map
    for i=pathStart:pathStart+pathSize-1
        gMap(i,:) = 0;
    end
	% setting agent position
	agentPosx = uint8((RMAX-RMIN)*rand(1)+RMIN);
	agentPosy = uint8((CMAX-CMIN)*rand(1)+CMIN);
	% update dist's based on agent position
	distFromEnd = CMAX - agentPosy;	
	distFromMidPath = agentPosx - uint8((pathStart+pathSize/2));
    
	while (distFromEnd > 0)

		nextDir = uint8((NUM_ACTIONS-1)*rand(1)+1);
		if(nextDir == 1) % North
			if(agentPosx == RMIN)
				continue
			end
			agentPosx = agentPosx - 1;		
        elseif(nextDir == 2) % North East
			if(agentPosx == RMIN)
				continue
            end
            agentPosx = agentPosx - 1;		
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 3) % East
			if(agentPosy == CMAX)
				continue
			end
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 4) % South East
			if(agentPosx == RMAX)
				continue
			end
			agentPosx = agentPosx + 1;		
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 5) % South
			if(agentPosx == RMAX)
				continue
			end
			agentPosx = agentPosx + 1;		
        end
		
		%correction based on new information
		reward = gMap(agentPosx,agentPosy);
	
        newdistFromEnd = CMAX - agentPosy;	
        newdistFromMidPath = agentPosx - uint8((pathStart+pathSize/2));
        lookupDistMidPath = distFromMidPath;
        lookupDistFromEnd = distFromEnd+1;

        lookupNewDistMidPath = newdistFromMidPath;
        lookupNewDistFromEnd = newdistFromEnd+1;

		if(distFromMidPath<=0)
            lookupDistMidPath = distFromMidPath + 25;
        end
        if(newdistFromMidPath<=0)
            lookupNewDistMidPath = newdistFromMidPath + 25;
        end   
		Q1(lookupDistMidPath, lookupDistFromEnd, nextDir)=Q1(lookupDistMidPath,lookupDistFromEnd,nextDir)*(1-LEARN_RATE)+LEARN_RATE*(reward+(DISCOUNT_FACT*max(Q1(lookupNewDistMidPath, lookupNewDistFromEnd,:))));
		%update dist's
		distFromEnd = newdistFromEnd;
		distFromMidPath = newdistFromMidPath;
	end
end

% Training for litter

for episodes=1:numEpisodes
	%generate map of 0's
	lMap = zeros(RMAX,CMAX);
    %generating 10 random litters
	% setting agent position
	agentPosx = uint8((RMAX-RMIN)*rand(1)+RMIN);
	agentPosy = uint8((CMAX-CMIN)*rand(1)+CMIN);
    agentLitter = zeros(11,2);
    agentLitter(1,1) = agentPosx;
    agentLitter(1,2) = agentPosy; 
    for i=1:10
	% generate x coordrandom number between RMIN and RMAX
        litterX = uint8((RMAX-RMIN)*rand(1)+RMIN);
	% generate y coordrandom number between CMIN and CMAX
        litterY = uint8((CMAX-CMIN)*rand(1)+CMIN);
        lMap(litterX,litterY) = 10;
        agentLitter(i+1,1) = litterX;
        agentLitter(i+1,2) = litterY;
    end
    
	% calculate distances from the litters
    allDistances = pdist(agentLitter)';

    for i=1:10
        distancesFromLitter(i) = allDistances(i);
    end
    [agentLitterDistances, litterIndices] = sort(distancesFromLitter);
    
    % choose nearest forward litter
    %agentPosx;
    %agentPosy;
    %agentLitter;
    %litterIndices;
    i=1;
    allBehind = 1;
    while i<=10
        litterPos = agentLitter(litterIndices(i)+1,:);
        if(litterPos(2) < agentPosy)
            i = i+1;
            continue
        else
            nearestLitterX = agentLitter(litterIndices(i)+1,1);
            nearestLitterY = agentLitter(litterIndices(i)+1,2);
            i=11;
            allBehind = 0;
        end
    end
    % incase all litters are behind me
    if(allBehind == 1)
        continue;
    end

 

    direction = 3;
    ySteps = nearestLitterY - int8(agentPosy);
    xSteps = nearestLitterX - int8(agentPosx);
    if (xSteps < 0 && ySteps == 0 ) % N
        direction = 1;
    elseif (xSteps < 0 && ySteps > 0 ) % NE
        direction = 2;
    elseif (xSteps == 0 && ySteps > 0 ) % E
        direction = 3;
    elseif (xSteps > 0 && ySteps > 0 ) % SE
        direction = 4;
    elseif (xSteps > 0 && ySteps ==0 ) % S
        direction = 5;
    end
    toContinue = 1;
	while (toContinue ~= 0)
		nextDir = uint8((NUM_ACTIONS-1)*rand(1)+1);
        
		if(nextDir == 1) % North
			if(agentPosx == RMIN)
				continue
			end
			agentPosx = agentPosx - 1;		
        elseif(nextDir == 2) % North East
			if(agentPosx == RMIN || agentPosy==CMAX)
				continue
            end
            agentPosx = agentPosx - 1;		
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 3) % East
			if(agentPosy == CMAX)
				continue
			end
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 4) % South East
			if(agentPosx == RMAX || agentPosy==CMAX)
				continue
			end
			agentPosx = agentPosx + 1;		
			agentPosy = agentPosy + 1;		
        elseif(nextDir == 5) % South
			if(agentPosx == RMAX)
				continue
			end
			agentPosx = agentPosx + 1;		
        end
		
        
        
		%correction based on new information
		reward = lMap(agentPosx,agentPosy);

        newySteps = nearestLitterY - int8(agentPosy);
        newxSteps = nearestLitterX - int8(agentPosx);

        if (newxSteps < 0 && newySteps == 0 ) % N
            newdirection = 1;
        elseif (newxSteps < 0 && newySteps > 0 ) % NE
            newdirection = 2;
        elseif (newxSteps == 0 && newySteps > 0 ) % E
            newdirection = 3;
        elseif (newxSteps > 0 && newySteps > 0 ) % SE
            newdirection = 4;
        elseif (newxSteps > 0 && newySteps ==0 ) % S
            newdirection = 5;
        end
        
        lookupY = ySteps+1;
        lookupNewY = newySteps+1;
        if(lookupNewY <= 0)
            toContinue = 0;
            continue
        end
		Q2(lookupY,direction, nextDir)=Q1(lookupY,direction,nextDir)*(1-LEARN_RATE)+LEARN_RATE*(reward+(DISCOUNT_FACT*max(Q1(lookupNewY,newdirection ,:))));
   		%update dist's
        ySteps = newySteps;
        direction = newdirection;

        if (toContinue == -1)
            toContinue = 0;
        elseif (toContinue ~= 0 && agentPosx == nearestLitterX && agentPosy == nearestLitterY)
            toContinue = -1;
        end

	end
end


hw7Agent.grass = Q1;
hw7Agent.litter = Q2;
%save hw7Agent
save('hw7Agent.mat','hw7Agent');