function data_structures(full_obs)
% declare variables %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global log_id;              % file id for the log file
global compare_policies;    % boolean value indicating whether multiple policies should be compared
global policy_names;        % the names of the policies being compared
global policies;            % and array of the policies being compared

global record_states;       % boolean value indicating whether the states should be recorded in state_set
global state_set;           % an array containing the recorded states

% user configurable variables
global num_players_r;       % number of players on the red team
global num_players_b;       % number of players on the blue team
global ball_position;       % initial position of the ball

global opponent;            % opponent parameters
global fixed_strategy;      % fixed strategy for team blue
global learning_algorithm;  % learning algorith in case system is in learning mode
global policy;              % policy to be followed if the system is in follow mode
global policy_file;         % .m file containing the policy to be followed

global load;		    % flag indicating whether a starting policy should be loaded
global starting_policy;	    % the starting policy on which the learning agents will build
global starting_point;	    % indicates how many learning episodes were completed while obtaining the starting policy

global system_mode;         % the running mode of the system
global home_path;           % the path leading to the root directory of the project

% structures for player memory
global actions;
global running;
global timers;
global targets;             % the last target of the GetOpen action for each player
global state_values;        % contain the value of the state when the last macro action was selected for each player

global actions_op;
global running_op;
global timers_op;
global targets_op;             % the last target of the GetOpen action for each opponent
global state_values_op;        % contain the value of the state when the last macro action was selected for each player

global ball_timer;          % timer for how long the ball has stayed in one place (used to prevent deadlocks)
% Sarsa related

global reward;      % immediate reward from state
global alpha;       % step-size constant
global delta;       % change
global epsilon;     % constant for epsilon-greedy selection
global gamma;       % discount rate
global lambda;      % eligibility trace value
global theta;       % parameters vector
global e;           % eligibility trace
global Fa;          % feature vector
global Qa;          % Q-value of current action

global num_tilings;         % number of tilings overlapped for each state variable
global parameter_dim;       % length of vector theta

% feature vector variables
global x_tiles;             % number of tiles for x position of object
global y_tiles;             % number of tiles for y position of object
global x_tile_width;        % width of each x-tile 
global y_tile_width;        % width of each y-tile
global angle_tiles;         % number of tiles for angle generalization
global angle_tile_width;    % width of each tile
global distance_tiles;      % number of tiles for distance generalization 
global distance_tile_width; % width of each tile

% stats
global red;
global blue;
global time;
global num_passes;
global random;              % counter of how many times each player explored;
global possession;          % time that the learning team is in possession;
global total_reward;        % the total reward accumulated by the whole team throughout the learning

% initialize variables %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% memory
actions = zeros(1,num_players_r);
running = zeros(1,num_players_r);
timers = zeros(1,num_players_r);
targets = cell(1,num_players_r);
state_values = zeros(1, num_players_r);

actions_op = zeros(1,num_players_b);
running_op = zeros(1,num_players_b);
timers_op = zeros(1,num_players_b);
targets_op = cell(1,num_players_b);
state_values_op = zeros(1, num_players_b);

% state representation and features
%num_tilings = 32;

x_tiles = 8; % 8 tiles
x_tile_width = 15; % 8*15 = 120 (the width of the pitch)

y_tiles = 4;
y_tile_width = 15;

distance_tiles = 14; % distance varies between 0 and 135; 14*10 = 140
distance_tile_width = 10;

angle_tiles = 8; % 8*45 = 360
angle_tile_width = 45;

parameter_dim = (num_tilings*(x_tiles+1)-1) + (num_tilings*(y_tiles+1)-1) + (num_players_r+num_players_b)*(num_tilings*(distance_tiles+1)-1) + (num_players_r+num_players_b)*(num_tilings*(angle_tiles+1)-1);

% sarsa related
theta = zeros(num_players_r, parameter_dim); % separate parameter vector for each possible action

% if we are saving states that occur
state_set = {};

% stats
red = 0;
blue = 0;
time = 0;
num_passes = 0;
random = 0;
possession = 0;
total_reward = 0;

% if we are loading a policy to start learning from
if strcmp(system_mode, 'load')
    
	file = loadfile(home_path, 'policies/', starting_policy);
    disp(sprintf('Loading file %s ...', starting_policy));
	theta = file.parameters;
    if (starting_point ~= 0),
        epsilon  = file.epsilon;
        red = file.ratio*starting_point;
        blue = starting_point - red;
        possession = file.possession_per_episode*starting_point;
        random = file.random_per_episode*(num_players_r*starting_point);
        total_reward = file.reward_per_episode*(num_players_r*starting_point);
        time = file.time;
    end
    
elseif strcmp(system_mode, 'follow')
    
    if(compare_policies)
        % open log file
        log_id = fopen(strcat(home_path,'log.txt'), 'w+');
        fprintf(log_id, strcat('Start Time:', datestr(now), '\n'));
        first_line = 'Policies |';
        for policy_num = 1:numel(policy_names)
           first_line = strcat(first_line, sprintf(' %s |', policy_names{policy_num}));
           file = loadfile(home_path,'policies/',policy_names{policy_num});
           policies{policy_num} = file.parameters;
        end
        first_line = strcat(first_line,'\n');
        fprintf(log_id, sprintf('Playing against %s using policy %s \n', opponent.name, policy_file));
        fprintf(log_id, first_line);
        fprintf(log_id, '------------------------------------------------\n');
    end
end

%theta
end
