
function newstuff
    t = cputime;

    function elapsed_time()
        fprintf('running for: %.2fs\n', cputime-t );
    end

    function work( test )
        % loads the trace file before using it (multiple times)
        % trace file contains the exact execution trace of the simulation
        trace = strcat('results/results.conditional_trace.',test,'.csv');
        assert( exist(trace,'file') ~=0 );
        
        fprintf('\n[ %s ]\n',trace);
        
        fprintf('-------- (opening file) --------\n');
        
        file = importdata(trace, '\t', 1);
        data = file.data;
        
        %{
        fprintf('-------- MISPREDICTION RATES --------\n');
        results( file );
        
        fprintf('-------- CLUSTERS --------\n');
        clusters( file , 'gshare_base' );
        %clusters( file , 'gshare_base_cheat' );
        %clusters( file , 'gshare+' );
        %clusters( file , 'gshare++' );
        %clusters( file , 'local_limited' );
        %clusters( file , 'ISL_TAGE-cond' );
       %}
        
        printer = classify( file );
        
        %{
        fprintf('-------- WRONGS --------\n');
        fprintf('-------- gshare_base --------\n');
        classifyWrongs('gshare_base');
        fprintf('-------- local_limited --------\n');
        classifyWrongs('local_limited');
        fprintf('-------- ISL_TAGE-cond --------\n');
        classifyWrongs('ISL_TAGE-cond');
        
        return;
        
        function classifyWrongs( predictor )
            PRED_COLUMN = getIndex(file.textdata, predictor );
            REAL_COLUMN = getIndex(file.textdata,'real');
            assert( PRED_COLUMN ~= -1 );
            assert( REAL_COLUMN ~= -1 );
            printer( data( data(:,PRED_COLUMN) ~= data(:,REAL_COLUMN) , : ) , true );
        end
        %}
    
        fprintf('-------- CORRELATIONS LOCALs --------\n');
        [LL_ID LL_data] = correlation( file , 'local_limited');
        [LU_ID LU_data] = correlation( file , 'local_unlimited');
        [LN_ID LN_data] = correlation( file , 'local_nocol');
        correlationLOCAL( file , LL_ID , LL_data , LU_ID , LU_data , LN_ID , LN_data , printer);
        
        fprintf('-------- CORRELATIONS GSHAREs --------\n');
        [GB_ID GB_data] = correlation( file , 'gshare_base');
        [GU_ID GU_data] = correlation( file , 'gshare_unlimited');
        [GN_ID GN_data] = correlation( file , 'gshare_nocol');
        [GP_ID GP_data] = correlation( file , 'gshare_path');
        correlationGSHARE( file , GB_ID, GB_data, GU_ID, GU_data, GN_ID, GN_data, GP_ID, GP_data, printer);
        
        
    end

    %work('est');

    work('INT04');
    work('CLIENT05');
    work('WS04');
    work('SERVER03');
    work('MM04');

    elapsed_time();
end

function results( file )
    %disp( file.textdata );
    
    data = file.data;
    
    REAL_COLUMN = getIndex(file.textdata,'real');
    cor_str = '_cor';
    cor = length('_cor');
    max = size(data,1);
    
    for i=6:length(file.textdata)
        str = char( file.textdata(i) );
        pos = findstr( str , cor_str );
        
        if ~isempty(pos) && (length( str )-cor+1) == pos
            % ignores correlation columns
            %disp( str );
        	continue;
        end
        
        missed = size( data(data(:,i) ~= data(:,REAL_COLUMN)) , 1);
        fprintf('%s\t%.2f\t%d\t%d\n',str,missed/max*100,missed,max);
    end
end


function [COR_ID COR_data] = correlation( file , predictor )
    COR_COLUMN = getIndex(file.textdata,strcat(predictor,'_cor'));
    assert( COR_COLUMN ~= -1 );
    
    data = file.data;
    COR_data = data(:,COR_COLUMN);
    COR_ID = unique( COR_data );
    
    % CHECKS IF ALL ARE CLASSIFIED
    %{
    accum = 0;
    for i=1:size(COR_ID,1)
        indexes = find( COR_data == COR_ID(i,1) );
        accum = accum + size(indexes,1);
    end
    assert( accum == size(data,1) );
    %}
    
    fprintf('%s\t %4d / %4d [%.3f]\n', ...
        predictor, size(COR_ID,1),size(data,1),size(data,1)/size(COR_ID,1));

end

function clusters( file , predictor )

    REAL_COLUMN = getIndex(file.textdata,'real');
    PRED_COLUMN = getIndex(file.textdata, predictor );
    
    assert( REAL_COLUMN ~= -1 );
    assert( PRED_COLUMN ~= -1 );
    
    trace = file.data;
    
    wrong = xor( trace(:,REAL_COLUMN) , trace(:,PRED_COLUMN) );
    
    c=0;
    chunks = 0;
    accum = zeros(11,1);
    avg = 0;
    total = 0;
    filter_point = 2;
    
    for i=1:size(wrong,1)
        if( wrong(i,1) == 1 )
            % found possible wrong chunk, start counting
            c = c + 1;
        else
            % now the predictor is correct ( reset c )
            if( c > 0 )
                % if it was bigger than 0, then count this as a new chunk
                chunks = chunks+1;
                
                %%%% hypothetical predictor %%%
                
                if( c >= filter_point ) % invert prediction after more than 1 missed
                    if( c > 10 )
                        accum(11,1) = accum(11,1) + 1;
                    else
                        accum(c,1) = accum(c,1) + 1;
                    end
                    avg = avg + c;
                    total = total+1;
                else
                    accum(c,1) = accum(c,1) + 1;
                end
            end
            c = 0;
        end
    end
    
    % --- Output Print --- %
    
    fprintf('--- clusters of %s ---\n',predictor);
    for i=1:size(accum,1)
        
        if( i > 10 )
            fprintf('10+\t%d\t%.2f\n', accum(i,1),accum(i,1)/total);
        else
            fprintf('%d\t%d\t%.2f\tP(%2d|%2d)\t%.2f\n', i,accum(i,1),accum(i,1)/total,...
                (i+1),i,...
                sum(accum(i+1:end,:))/sum(accum(i:end,:)) );
            %{ P( missing n+1 | missed n ) ... }%
        end
    end
    fprintf('average(>=%d):\t%.2f\n',filter_point,avg/total);
    
    fprintf('--- predictors ( after %d wrongs )---\n',filter_point );
    
    predictions = size( trace, 1 );
    wrongs = size(trace( trace(:,REAL_COLUMN) ~= trace(:,PRED_COLUMN), : ),1);
    
    baseline = wrongs;
    % if switches back at the exactly correct time
    optimal = wrongs-avg+(total*filter_point);
    % if switches back after missing one (thus, add total as missed)
    worst = wrongs-avg+total+(total*filter_point);
    
    fprintf('BASE : %3.2f \t ( %5d / %d ) \n', baseline/predictions*100 , baseline, predictions );
    fprintf('BEST : %3.2f \t [ %5d | %3.2f ] \n', optimal/predictions*100 , (optimal-baseline), (optimal-baseline)/predictions*100  );
    fprintf('WORST: %3.2f \t [ %5d | %3.2f ] \n', worst/predictions*100 , (worst-baseline),  (worst-baseline)/predictions*100 );

end


function correlationLOCAL( file , LL_ID , LL_data , LU_ID , LU_data , LN_ID , LN_data , printer )
    data = file.data;

    % correlation LOCAL %
    
    fprintf(' ---- CORRELATION LOCAL ---- \n');
    
    % warning: these indexes are used to access 'matrix'
    LOCAL_LIMT = 1;
    LOCAL_UNLM = 2;
    LOCAL_NOCO = 3;
    
    LOCAL_LIMT_COLUMN = getIndex(file.textdata,'local_limited');
    LOCAL_UNLM_COLUMN = getIndex(file.textdata,'local_unlimited');
    LOCAL_NOCO_COLUMN = getIndex(file.textdata,'local_nocol');
    REAL_COLUMN = getIndex(file.textdata,'real');
    
    assert( LOCAL_LIMT_COLUMN ~= -1 );
    assert( LOCAL_UNLM_COLUMN ~= -1 );
    assert( LOCAL_NOCO_COLUMN ~= -1 );
    assert( REAL_COLUMN ~= -1 );
    
    % only analysis at most MAX predictions
    bests = zeros( size(data,1) , 1 );

    data_limt = data(:,LOCAL_LIMT_COLUMN);
    data_unlm = data(:,LOCAL_UNLM_COLUMN);
    data_noco = data(:,LOCAL_NOCO_COLUMN);
    data_real = data(:,REAL_COLUMN);
    
    % check size order
    assert( size(LN_ID,1) >= size(LU_ID,1) && ...
        size(LU_ID,1) >= size(LL_ID,1) );
    
    accum_count = 0;
    accum_limt = 0;
    accum_unlm = 0;
    accum_noco = 0;
    
    accum = 0;
    
    % for each small set
    for i=1:size(LN_ID,1)
        % indexes for the members of this set
        indexes = find( LN_data == LN_ID(i,1) );
        accum = accum + size(indexes,1);
        
        filtered_real = data_real(indexes);
        
        miss_limt = sum( data_limt(indexes) ~= filtered_real );
        miss_unlm = sum( data_unlm(indexes) ~= filtered_real );
        miss_noco = sum( data_noco(indexes) ~= filtered_real );
        
        accum_limt = accum_limt + miss_limt;
        accum_unlm = accum_unlm + miss_unlm;
        accum_noco = accum_noco + miss_noco;
        
        % which one is the best?
        if( miss_limt <= miss_unlm && miss_limt <= miss_noco )
            bests( indexes ) = LOCAL_LIMT ;
            %assert( size( unique(LL_data( indexes )), 1 ) == 1 );
            accum_count = accum_count + miss_limt;
            continue;
        end
        
        if( miss_unlm < miss_limt && miss_unlm <= miss_noco )
            bests( indexes ) = LOCAL_UNLM ;
            %assert( size( unique(LU_data( indexes )), 1 ) == 1 );
            accum_count = accum_count + miss_unlm;
            continue;
        end
        
        if( miss_noco < miss_limt && miss_noco < miss_unlm )
            bests( indexes ) = LOCAL_NOCO ;
            %assert( size( unique(LN_data( indexes )), 1 ) == 1 );
            accum_count = accum_count + miss_noco;
            continue;
        end
        
        % all cases should have been covered... very redundantly
        assert( false );
    end
    
    
    % do all predictions have a class?
    assert( accum == size(data,1) );
    %assert( size( bests( bests == 0 , : ) , 1 ) == 0 );
    
    % misses
    pred_total = size(data,1);
    accum_count = accum_count/pred_total*100;
    accum_limt = accum_limt/pred_total*100;
    accum_unlm = accum_unlm/pred_total*100;
    accum_noco = accum_noco/pred_total*100;
    
    fprintf('best: %.2f\n', accum_count );
    fprintf('base: %.2f (%.2f)\n', accum_limt, accum_limt-accum_count );
    fprintf('unlm: %.2f (%.2f)\n', accum_unlm, accum_unlm-accum_count );
    fprintf('noco: %.2f (%.2f)\n', accum_noco, accum_noco-accum_count );
    
    % 'bests' contains level where each single PREDICTION is best
    matrix = zeros( 3 , 3 );
    
    matrix(LOCAL_UNLM,LOCAL_LIMT) = 0;
    
    matrix(LOCAL_NOCO,LOCAL_LIMT) = 0;
    matrix(LOCAL_NOCO,LOCAL_UNLM) = 0;

    accum = 0;
    
    tmp_accum = 0;
    % is LOCAL_LIMT best level ?
    for i=1:size( LL_ID, 1)
        indexes = find( LL_data == LL_ID(i,1) );
        tmp_accum = tmp_accum + size(indexes,1);
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of LOCAL_LIMT
            % best 'prediction set'
            if( LOCAL_LIMT <= lvl(1,1) )
                matrix(LOCAL_LIMT,lvl(1,1)) = matrix(LOCAL_LIMT,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),LOCAL_LIMT) = matrix(lvl(1,1),LOCAL_LIMT)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
            accum = accum + size(indexes,1);
        end 
    end
    assert( tmp_accum == size(data,1) );
    
    tmp_accum = 0;
    % is LOCAL_UNLM best level ?
    for i=1:size( LU_ID, 1)
        indexes = find( LU_data == LU_ID(i,1) );
        tmp_accum = tmp_accum + size(indexes,1);
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of LOCAL_UNLM
            % best 'prediction set'
            if( LOCAL_UNLM <= lvl(1,1) )
                matrix(LOCAL_UNLM,lvl(1,1)) = matrix(LOCAL_UNLM,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),LOCAL_UNLM) = matrix(lvl(1,1),LOCAL_UNLM)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
            accum = accum + size(indexes,1);
        end
    end
    assert( tmp_accum == size(data,1) );
    
    tmp_accum = 0;
    % is LOCAL_NOCO best level ?
    for i=1:size( LN_ID, 1)
        indexes = find( LN_data == LN_ID(i,1) );
        tmp_accum = tmp_accum + size(indexes,1);
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of LOCAL_NOCO
            % best 'prediction set'
            if( LOCAL_NOCO <= lvl(1,1) )
                matrix(LOCAL_NOCO,lvl(1,1)) = matrix(LOCAL_NOCO,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),LOCAL_NOCO) = matrix(lvl(1,1),LOCAL_NOCO)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
            accum = accum + size(indexes,1);
        end
        
        %{
        if sum(bests(indexes) == -1,1) ~= size(indexes,1)
            disp( ' bug ' ); % overlapping elements!
            disp( size(indexes,1) );
            disp( ' ---- ' );
            disp( size(lvl,1) );
            disp( ' === ' );
            disp( lvl );
        end
        
        assert ( sum(bests(indexes) == -1,1) == size(indexes,1) );
        %}
    end
    assert( tmp_accum == size(data,1) );
    
    % ignore potentially overlapping elements
    % FIXME: this is a bug in the design of the analysis!
    % the correlation split is LOCAL is not disjoint! WTF?
    accum = size(data,1)-accum;
    
    disp( matrix );
    disp( matrix/(size(data,1)-accum)*100 );
    
    burstsAnalysis( LL_ID , LL_data , data, data_limt, data_real , printer);
        
end

function correlationGSHARE( file , GB_ID , GB_data , GU_ID , GU_data , GN_ID , GN_data , GP_ID , GP_data , printer )
    data = file.data;

    fprintf(' ---- CORRELATION GSHARE ---- \n');
    
    % warning: these indexes are used to access 'matrix'
    GSHARE_BASE = 1;
    GSHARE_UNLM = 2;
    GSHARE_NOCO = 3;
    GSHARE_PATH = 4;
    
    GSHARE_BASE_COLUMN = getIndex(file.textdata,'gshare_base');
    GSHARE_UNLM_COLUMN = getIndex(file.textdata,'gshare_unlimited');
    GSHARE_NOCO_COLUMN = getIndex(file.textdata,'gshare_nocol');
    GSHARE_PATH_COLUMN = getIndex(file.textdata,'gshare_path');
    REAL_COLUMN = getIndex(file.textdata,'real');
    
    assert( GSHARE_BASE_COLUMN ~= -1 );
    assert( GSHARE_UNLM_COLUMN ~= -1 );
    assert( GSHARE_NOCO_COLUMN ~= -1 );
    assert( GSHARE_PATH_COLUMN ~= -1 );
    assert( REAL_COLUMN ~= -1 );
    
    % only analysis at most MAX predictions
    bests = zeros( size(data,1) , 1 );

    data_base = data(:,GSHARE_BASE_COLUMN);
    data_unlm = data(:,GSHARE_UNLM_COLUMN);
    data_noco = data(:,GSHARE_NOCO_COLUMN);
    data_path = data(:,GSHARE_PATH_COLUMN);
    data_real = data(:,REAL_COLUMN);
    
    base_sets = GB_ID; % lower number , larger each
    unlm_sets = GU_ID;
    noco_sets = GN_ID;
    path_sets = GP_ID; % larger number , smaller each
    
    accum_count = 0;
    accum_base = 0;
    accum_unlm = 0;
    accum_noco = 0;
    accum_path = 0;
    
    % for each small set
    for i=1:size(path_sets,1)
        % indexes for the members of this set
        indexes = find( GP_data == path_sets(i,1) );
        
        filtered_real = data_real(indexes);
        
        miss_base = sum( data_base(indexes) ~= filtered_real );
        miss_unlm = sum( data_unlm(indexes) ~= filtered_real );
        miss_noco = sum( data_noco(indexes) ~= filtered_real );
        miss_path = sum( data_path(indexes) ~= filtered_real );
        
        accum_base = accum_base + miss_base;
        accum_unlm = accum_unlm + miss_unlm;
        accum_noco = accum_noco + miss_noco;
        accum_path = accum_path + miss_path;
        
        % which one is the best?
        if( miss_base <= miss_unlm && miss_base <= miss_noco && miss_base <= miss_path )
            bests( indexes ) = GSHARE_BASE;
            accum_count = accum_count + miss_base;
            continue;
        end
        
        if( miss_unlm < miss_base && miss_unlm <= miss_noco && miss_unlm <= miss_path )
            bests( indexes ) = GSHARE_UNLM;
            accum_count = accum_count + miss_unlm;
            continue;
        end
        
        if( miss_noco < miss_base && miss_noco < miss_unlm && miss_noco <= miss_path )
            bests( indexes ) = GSHARE_NOCO;
            accum_count = accum_count + miss_noco;
            continue;
        end
        
        if( miss_path < miss_base && miss_path < miss_unlm && miss_path <= miss_noco )
            bests( indexes ) = GSHARE_PATH;
            accum_count = accum_count + miss_path;
            continue;
        end
        
        % all cases should have been covered... very redundantly
        assert( false );
    end
    
    % do all predictions have a class?
    %assert( size( bests( bests == 0 , : ) , 1 ) == 0 );

    
    % misses
    pred_total = size(data,1);
    accum_count = accum_count/pred_total*100;
    accum_base = accum_base/pred_total*100;
    accum_noco = accum_noco/pred_total*100;
    accum_unlm = accum_unlm/pred_total*100;
    accum_path = accum_path/pred_total*100;
    
    fprintf('best: %.2f\n', accum_count );
    fprintf('base: %.2f (%.2f)\n', accum_base, accum_base-accum_count );
    fprintf('unlm: %.2f (%.2f)\n', accum_unlm, accum_unlm-accum_count );
    fprintf('noco: %.2f (%.2f)\n', accum_noco, accum_noco-accum_count );
    fprintf('path: %.2f (%.2f)\n', accum_path, accum_path-accum_count );
    
    % 'bests' contains level where each single PREDICTION is best
    matrix = zeros( 4 , 4 );
    
    matrix(GSHARE_UNLM,GSHARE_BASE) = 0;
    matrix(GSHARE_NOCO,GSHARE_BASE) = 0;
    matrix(GSHARE_PATH,GSHARE_BASE) = 0;
    
    matrix(GSHARE_NOCO,GSHARE_UNLM) = 0;
    matrix(GSHARE_PATH,GSHARE_UNLM) = 0;
    
    matrix(GSHARE_PATH,GSHARE_NOCO) = 0; % TODO wrong!!

    % is GSHARE_BASE best level ?
    for i=1:size( base_sets, 1)
        indexes = find( GB_data == base_sets(i,1) );
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of GSHARE_BASE
            % best 'prediction set'
            if( GSHARE_BASE <= lvl(1,1) )
                matrix(GSHARE_BASE,lvl(1,1)) = matrix(GSHARE_BASE,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),GSHARE_BASE) = matrix(lvl(1,1),GSHARE_BASE)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
        end 
    end

    % is GSHARE_UNLM best level ?
    for i=1:size( unlm_sets, 1)
        indexes = find( GU_data == unlm_sets(i,1) );
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of GSHARE_UNLM
            % best 'prediction set'
            if(GSHARE_UNLM <= lvl(1,1))
                matrix(GSHARE_UNLM,lvl(1,1)) = matrix(GSHARE_UNLM,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),GSHARE_UNLM) = matrix(lvl(1,1),GSHARE_UNLM)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
        end
    end
    
    % is GSHARE_NOCO best level ?
    for i=1:size( noco_sets, 1)
        indexes = find( GN_data == noco_sets(i,1) );
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of GSHARE_NOCO
            % best 'prediction set'
            if(GSHARE_NOCO <= lvl(1,1))
                matrix(GSHARE_NOCO,lvl(1,1)) = matrix(GSHARE_NOCO,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),GSHARE_NOCO) = matrix(lvl(1,1),GSHARE_NOCO)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
        end
    end
    
    % is GSHARE_PATH best level ?
    for i=1:size( path_sets, 1)
        indexes = find( GP_data == path_sets(i,1) );
        lvl = unique( bests(indexes) );
        if( size(lvl,1) == 1 && lvl(1,1) ~= -1 )
            % best 'prediction stream' is that of GSHARE_PATH
            % best 'prediction set'
            if(GSHARE_PATH <= lvl(1,1))
                matrix(GSHARE_PATH,lvl(1,1)) = matrix(GSHARE_PATH,lvl(1,1))+size(indexes,1);
            else
                matrix(lvl(1,1),GSHARE_PATH) = matrix(lvl(1,1),GSHARE_PATH)+size(indexes,1);
            end
            % mark as classified
            bests(indexes) = -1;
        end
    end
    
    disp( matrix );
    disp( matrix/size(bests,1)*100 );
    
    %disp( sum(bests) );
    %disp( size(bests,1) );
    %assert( sum(bests) == size(bests,1)*-1 );
   
    burstsAnalysis( base_sets , GB_data , data, data_base, data_real , printer);
end

function burstsAnalysis( base_sets , map , data, data_pred, data_real, printer)

    % only 11 set sizes
    set_sizes = zeros( 11, 1 );
    
    % warmups
    set_warmup = zeros( 12 , 1 );
    
    % flips
    set_flips = zeros( 12 , 1 );
    
    % mispredictions
    set_misp = zeros( 6 , 1 );
    
    % classes (second column for conflicts)
    set_classes = zeros( 10 , 2 );
    
    % noise
    set_noise = zeros( 6 , 1 );
    
    for i=1:size( base_sets, 1)
        indexes = find( map == base_sets(i,1) );
        
        [nclasses intf] = printer( data(indexes,:) , false);
        set_classes( nclasses , 1 ) = set_classes( nclasses , 1 ) + 1;
        if( intf > 0 )
            set_classes( nclasses , 2 ) = set_classes( nclasses , 2 ) + 1;
        end
        
        % set size classification
        s = size(indexes,1);
        if( s > 10 )
            set_sizes( 11 , 1 ) = set_sizes( 11 , 1 ) + 1;
        else
            set_sizes( s , 1 ) = set_sizes( s , 1 ) + 1;
        end
        
        % mispredictions
        misp = sum( data_pred(indexes) ~= data_real(indexes) )/size(indexes,1);
        misp = round(misp*5)+1;
        set_misp( misp , 1 ) = set_misp( misp, 1 ) + 1;
        
        % -----
        %base_misses(i,1) = base_sets(i,1);
        %base_misses(i,2) = size( indexes, 1 );
        %base_misses(i,3) = sum( data_pred(indexes) ~= data_real(indexes) );
        
        [ warmup burst_number flips noise taken nottaken] = setAnalysis( data_real(indexes) );
        
        if( noise == -1 )
            noise = 1; % which means 0% of noise
        else
            noise = round(noise/s*5)+1;
        end
        set_noise( noise, 1 ) = set_noise( noise, 1 ) + 1 ;
        
        if( flips > 10 )
            set_flips( 12 , 1 ) = set_flips( 12 , 1 ) + 1;
        else
            set_flips( flips+1 , 1 ) = set_flips( flips+1 , 1 ) + 1;
        end
        
        if( warmup == -1 )
            set_warmup( 1 , 1 ) = set_warmup( 1 , 1 ) + 1;
        else
            if( warmup > 10 )
                set_warmup( 12 , 1 ) = set_warmup( 12 , 1 ) + 1;
            else
                set_warmup( warmup+1 , 1 ) = set_warmup( warmup+1 , 1 ) + 1;
            end
        end
    end
    
    % pretty print
    
    fprintf('--- set sizes: ---\n');
    for i=1:size(set_sizes,1)
        if( i == size(set_sizes,1) )
            fprintf('10+\t%d\t%.2f\n',set_sizes(i,1),set_sizes(i,1)/size( base_sets, 1) );
        else
            fprintf('%d\t%d\t%.2f\n',i,set_sizes(i,1),set_sizes(i,1)/size( base_sets, 1) );
        end
    end
    
    fprintf('--- mispredictions: ---\n');
    for i=1:size(set_misp,1)
        fprintf('%d%%\t%d\t%.2f\n',(i-1)*20,set_misp(i,1),set_misp(i,1)/size(base_sets,1) );
    end
    
    fprintf('--- noise: ---\n');
    for i=1:size(set_noise,1)
        fprintf('%d%%\t%d\t%.2f\n',(i-1)*20,set_noise(i,1),set_noise(i,1)/size( base_sets, 1) );
    end
    
    fprintf('--- flips: ---\n');
    for i=1:size(set_flips,1)
        if( i == size(set_flips,1) )
            fprintf('10+\t%d\t%.2f\n',set_flips(i,1),set_flips(i,1)/size(base_sets,1) );
        else
            fprintf('%d\t%d\t%.2f\n',(i-1),set_flips(i,1),set_flips(i,1)/size(base_sets,1) );
        end
    end
    
    fprintf('--- warmup: ---\n');
    for i=1:size(set_warmup,1)
        if( i == 1 )
            fprintf('never\t%d\t%.2f\n',set_warmup(i,1),set_warmup(i,1)/size( base_sets, 1) );
            continue;
        end
        
        if( i == size(set_warmup,1) )
            fprintf('10+\t%d\t%.2f\n',set_warmup(i,1),set_warmup(i,1)/size( base_sets, 1) );
        else
            fprintf('%d\t%d\t%.2f\n',i-1,set_warmup(i,1),set_warmup(i,1)/size( base_sets, 1) );
        end
    end
    
    fprintf('--- numb. classes : ---\n');
    for i=1:size(set_classes,1)
        fprintf('%d\t%d\t%d\t%.2f\n',i,set_classes(i,1),set_classes(i,2),set_classes(i,1)/size( base_sets, 1) );
    end
    
    % this was just to print samples...
    %{
    sorted = base_misses;
    sorted = sorted( sorted(:,2) > 1 , : ); % only for those sets larger that 1
    sorted = sortrows(sorted,-2); % sort by size, descending 
    
    filter_size = max( ceil(size(base_misses)/10) , 10 );
    % these are the biggest we are considering
    sorted = sorted(1:min(size(sorted,1),filter_size), : );
    sorted = sortrows(sorted,3); % sort by misses, ascending
    
    fprintf('BIG SETS: %d\n',size(sorted,1));
    
    bests = sorted(1:3,:);
    
    middle = round(size(sorted,1)/2);
    middle = max(1,middle-1);
    middle = sorted(middle:min(middle+3,size(sorted,1)),:);
    worsts = sorted(size(sorted,1)-2:end,:);
    
    fprintf('--- showing(bests): %d ---\n',size(bests,1));
    setPrinter(bests, map, data, data_real, printer);
    fprintf('===== END bests ====== \n');
    fprintf('--- showing(middle): %d ---\n',size(middle,1));
    setPrinter(middle, map, data, data_real, printer);
    fprintf('===== END middle ====== \n');
    fprintf('--- showing(worsts): %d ---\n',size(worsts,1));
    setPrinter(worsts, map, data, data_real, printer);
    fprintf('===== END worsts ====== \n');
    
    %}
end

% prints set characterization and its branches
function setPrinter( set , map , data_pred , data_real , printer )
    % for each branch in the set
    for i=1:size(set,1)
        fprintf('set #%d [size=%2d][miss=%.3f %d/%d]\n',set(i,1),set(i,2),set(i,3)/set(i,2),set(i,3),set(i,2));

        branch = set(i,1);
        % set's indexes in the trace
        indexes = find( map == branch );
        branches = data_real( indexes, :);
        [ warmup burst_number burts_flip transition_length taken nottaken] = setAnalysis( branches );

        fprintf('warm-up time: %d\n',warmup );

        bursts = burst_number( burst_number > -1 );
        fprintf('avg. stream burst: %2.3f\n',sum(bursts)/size(bursts,1));

        flips = burts_flip( burts_flip > -1 );
        fprintf('avg. burts flips : %2.3f\n',sum(flips)/size(flips,1));

        transitions = transition_length( transition_length > -1 );
        fprintf('avg. trans. leng.: %2.3f\n',sum(transitions)/size(transitions,1));
        fprintf('num. trans. : %d\n',size(transitions,1));
        fprintf('taken: %2d \t / not taken %2d\n',taken,nottaken);

        % prints sets characteristics
        fprintf('-- classification: --\n');
        printer( data_pred( indexes, : ) );
        fprintf('-----------\n');
    end
end

% burst analysis for a behavior, should only be used in a single set if it
% is used to yeal a set's burst analysis...
% use set's REAL branch behavior
% set should be of the form: [ 0 1 0 0 0 ... 1 ]' (n-rows,1-column)
function [ warmup burst_number nflips noise taken nottaken ] = ...
    setAnalysis( set_behavior )

    WARMUP_SUM = 3; % 2-bit counter only, (N-bit + 1)
    BURST_THRESHOLD = 2; % only a burst if at least 2

    warmup = -1;
    burst_number = -1;
    burts_flip = -1;
    noise = -1;
    nflips = 0;
    
    w = 0;
    last = -1;
    count = 0;
    last_burst = -1;
    flips = 0;
    bursts = 0;
    n_bursts = 0;

    transition = 0;
    n_transition = 0;

    taken = 0;
    nottaken = 0;

    % the set is not significantly large
    if( size(set_behavior,1) < 2 )
        return;
    end

    for p=1:size(set_behavior,1)

        b = set_behavior(p,1);
        if( b == 0 )
            nottaken = nottaken + 1;
        else
            taken = taken+1;
        end

        % WARM UP %
        % only if not warmed up yet.
        if( abs(w) < WARMUP_SUM )
            % warm up : sum of taken/not taken is greater than N-bit +1
            if( b == 0 )
                w = w + -1;
            else
                w = w + 1;
            end
            if( abs(w) >= WARMUP_SUM )
                % ok, it has warmed...
                warmup = p;
            end
        end

        % BURST / TRANSITION %
        % last value forced to count as "break" to count lasts
        if( last == b && p~=size(set_behavior,1) )
            count = count+1;
        else
            if( last == -1 )
                count = 1;
            else
                if( last == b ) %p==size(set_behavior,1)
                    count = count+1;
                end

                if( count >= BURST_THRESHOLD )
                    bursts = bursts + count;
                    n_bursts = n_bursts + 1;
                    if( last_burst ~= -1 && last ~= last_burst )
                        flips = flips + 1;
                    end
                    last_burst = last;
                    n_transition = n_transition+1;
                else
                    transition = transition+1;

                    % if about to end the set, count this as a transition
                    if( p==size(set_behavior,1) )
                        n_transition = n_transition+1;
                    end
                end
                count = 0;
            end
            last = b;
        end
    end

    burst_number = bursts;%/size(set_behavior,1);
    nflips = flips; %/n_bursts;
    noise = transition; %/n_transition;
end
    
function printer = classify( file )

    % index of useful columns in 'file'
    TARGET_COLUMN = getIndex(file.textdata,'target');
    BRANCH_COLUMN = getIndex(file.textdata,'branch');
    REAL_COLUMN = getIndex(file.textdata,'real');
    
    assert( TARGET_COLUMN ~= -1 );
    assert( BRANCH_COLUMN ~= -1 );
    assert( REAL_COLUMN ~= -1 );
    
    % identify branches
    data = file.data;
    branches = unique(data(:,BRANCH_COLUMN));
    
    MAX_COMPLEX_SHIFTS = 32;
    
    % KINDS
    ALWAYS_KIND = 1;
    NEVER_KIND = 2;
    LOOP_KIND = 3;
    STRICT_KIND = 4;
    BLOCK_KIND = 5;
    COMPLEX_KIND = 6;
    SINGLE_TAKEN_KIND = 7;
    SINGLE_NOTTAKEN_KIND = 8;
    ALTERNATING = 9;
    OTHERS_KIND = 0; % default is others
            
    fprintf('-------- CLASSIFICATION -------- \n');
    
    fprintf('classifying: %d\n',size(branches,1));
    
    kinds = classification( branches );
    printKinds( kinds , true );
    
    printer = @(misses,printme)printKinds(wrongs_classification( misses , kinds, branches ),printme);
    
    function [nclasses nconf] = printKinds( kinds , printme )
        
        one_tak = length( kinds( kinds == SINGLE_TAKEN_KIND , : ) );
        one_not = length( kinds( kinds == SINGLE_NOTTAKEN_KIND , : ) );
        always  = length( kinds( kinds == ALWAYS_KIND , : ) );
        never   = length( kinds( kinds == NEVER_KIND  , : ) );
        loop    = length( kinds( kinds == LOOP_KIND   , : ) );
        strict  = length( kinds( kinds == STRICT_KIND , : ) );
        block   = length( kinds( kinds == BLOCK_KIND  , : ) );
        pattern = length( kinds( kinds == COMPLEX_KIND , : ) );
        altern  = length( kinds( kinds == ALTERNATING , : ) );
        others  = length( kinds( kinds == OTHERS_KIND , : ) );
        total   = length( kinds );
        
        nclasses = (one_tak>0)+(one_not>0)+(always>0)+(never>0)+(loop>0)+...
            (strict>0)+(block>0)+(pattern>0)+(altern>0)+(others>0);
        
        
        nconf = (((one_tak>0)||(always>0)) && ((one_not>0)||(never>0))) ...
        || ( (others>0) && ( (block>0) || (strict>0) || (loop>0) || (always>0) || (never>0) ) );
        
        if( printme == true )
            fprintf('one\t%d\t%.2f\n', one_tak, one_tak/total*100 );
            fprintf('zero\t%d\t%.2f\n', one_not, one_not/total*100 );
            fprintf('always\t%d\t%.2f\n', always, always/total*100 );
            fprintf('never\t%d\t%.2f\n', never, never/total*100 );
            fprintf('loop\t%d\t%.2f\n', loop, loop/total*100 );
            fprintf('alternating\t%d\t%.2f\n', altern, altern/total*100 );
            fprintf('strict\t%d\t%.2f\n', strict, strict/total*100 );
            fprintf('block\t%d\t%.2f\n', block, block/total*100 );
            fprintf('complex\t%d\t%.2f\n', pattern, pattern/total*100 );
            fprintf('others\t%d\t%.2f\n', others, others/total*100 );
            fprintf('total=%d\n',total);
        end
        
        assert( total == altern+one_tak+one_not+always+never+loop+strict+block+pattern+others);
    end

    % AUX FUNCTION DEFINITIONS %
    
    % how many times it missed on a specific class...
    function wrongs = wrongs_classification( misses , kinds, branches )
        wrongs = zeros( size(misses,1) , 1 );
        
        for i=1:size(misses,1)
            branch = misses(i,BRANCH_COLUMN);
            index = find( branches == branch );
            assert( size(index,1) == 1 );
            wrongs(i,1) = kinds(index,1);
        end

    end
    
    function kinds = classification( branches )
        kinds = zeros(size(branches,1),1);
        
        for i=1:size(branches,1)
            branch = branches(i,1);
            % fetch all rows with this branch
            % column 2 is 'branch' column
            branch_data = data( data(:,BRANCH_COLUMN) == branch , : );
            
            % SINGLE TAKEN / NOT %
            if( size(branch_data,1) == 1 )
                p = branch_data(:,REAL_COLUMN);
                if( p == 1 )
                    kinds(i,1) = SINGLE_TAKEN_KIND;
                    continue;
                else
                    kinds(i,1) = SINGLE_NOTTAKEN_KIND;
                    continue;
                end
            end
            
            % ALWAYS / NEVER %
            
            s = sum( branch_data(:,REAL_COLUMN) );
            if( s == 0 )
                % only contains 0s, thus "not taken"
                kinds(i,1) = NEVER_KIND;
                continue;
            end
            
            if( s == size(branch_data,1) )
                % same size as number of rows of data, thus "always taken"
                kinds(i,1) = ALWAYS_KIND;
                continue;
            end
            
            % LOOP %
            
            % they should all have the same target, thus only look at first
            if( branch_data(1,TARGET_COLUMN) < branch )
                % jump to a PC lower than branch's PC, thus "loop"
                kinds(i,1) = LOOP_KIND;
                continue;
            end
            
            % STRICT / BLOCK %
            
            nottaken = -1;
            taken = -1;
            
            n = 0;
            last = -1;
            sw = 0;
            
            strict_test = true;
            block_test = false;
            
            for j=1:size(branch_data,1)
                pred = branch_data(j,REAL_COLUMN);
                
                % on a value switch
                if( last ~= -1 && pred ~= last )
                    sw = sw + 1;
                    
                    % set initial values, if needed
                    if( taken == -1 && last == 1 )
                        taken = n;
                    end
                    if( nottaken == -1 && last == 0 )
                        nottaken = n;
                    end
                    
                    % if numbers don't match with previously observed
                    if( ( last == 1 && taken ~= n ) || ( last == 0 && nottaken ~= n ) )
                        
                        % not a strict block pattern
                        strict_test = false;
                        
                        % maybe the first block was cut?
                        % this is only valid if we are replacing with a larger
                        % block size, never smaller...
                        if( sw == 2 && ((last==1 && n>taken )||(last==0 && n>nottaken) ) )
                            block_test = true;
                            if( last == 1 )
                                taken = n;
                            else
                                nottaken = n;
                            end
                        else
                            % nope, not cut. just not a block
                            block_test = false;
                            break;
                        end
                    end
                    
                    % reset block counter
                    n = 0;
                end
                
                n = n+1;
                last = pred;
            end
            
            % remove those that break the limit eventhough they don't switch at
            % the end of their cycle
            if( strict_test )
                if( last == 0 )
                    if( nottaken == -1 )
                        nottaken = n;
                    else
                        if( n > nottaken )
                            strict_test = false;
                            
                            % maybe non-strict?
                            if( sw == 2 )
                                nottaken = n;
                                block_test = true;
                            else
                                block_test = false;
                            end
                        end
                    end
                else % last == 1
                    if( taken == -1 )
                        taken = n;
                    else
                        if( n > taken )
                            strict_test = false;
                            
                            % non-strict?
                            if( sw == 2 )
                                taken = n;
                                block_test = true;
                            else
                                block_test = false;
                            end
                        end
                    end
                end
            end
            
            if( strict_test )
                if( taken == 1 && nottaken == 1 )
                    kinds(i,1) = ALTERNATING;
                    continue;
                end
                kinds(i,1) = STRICT_KIND;
                continue;
            end
            if( block_test )
                kinds(i,1) = BLOCK_KIND;
                continue;
            end
            
            % COMPLEX %
            
            behavior = branch_data(:,REAL_COLUMN);
            max = size(behavior,1);
            
            complex = false;
            for j=1:min( max-1, MAX_COMPLEX_SHIFTS )
                shift = circshift(behavior,j);
                if( sum( xor(shift,behavior) ) == max )
                    complex = true;
                    break;
                end
            end
            if( complex )
                kinds(i,1) = COMPLEX_KIND;
                continue;
            end
            
        end
    end

end


% finds the column index that contains str
function res = getIndex(M,str)
    for i=1:length(M)
        if( strcmp( M(1,i) , str ) )
            res = i;
            return;
        end
    end
    res = -1;
end
