#!/usr/bin/env perl
# ############################################################################
#                                  check.perl
#
# This script is intended to shed some light on the input features of
# classifiers, and help to identify their relative values, and potential
# pitfalls associated with them.
# The input format is plain SNNS training file format, without comment lines,
# without header.
#
# Version history:
#  v0.1  -  basic version.
#           Creator: Thomas Kemp
#           Date: 24. July 2002
#           Description: see above.
#
# ############################################################################


$Verbose = 2;

# integral (-infty,x) of N(0,1)
# after this, there is a global variable $Multiplier
#  and an array $F[..]
#  suppose you need the integral from minus infinity to 0.78 of the standard
#  normal density. Then, multiply the 0.78 times $Multiplier,
#  resulting in e.g. 780 for the case of $Multiplier being 1000,
#  and look in $F[780] (which should be 0.782305 according to
#  the tabulated value in [Bamberg/Baur])
#  if you need the integral from 0 to x, just subtract 0.5 from the
#  integral value from -infty to x
#
sub init_find_highbound {
  local($x0,$x1,$t,$v,$i,$step,$intval,$sqr2pi);
  $Multiplier = 1000;
  $step = 1.0/$Multiplier;
  
  $sqr2pi = 1.0 / sqrt(2.0 * 3.1415926535);
  $intval = 0.500;
  for ($i=0.0; $i < 5.0; $i += $step) {
    $F[$t] = $intval;
    #print "$i $intval\n";
    $x0 = exp(-$i*$i/2.0);
    $v = $i + $step;
    $x1 = exp(-$v*$v/2.0);
    $intval += ($x0+$x1)/2.0*$step*$sqr2pi;
    $t++;
  }
}

# this subroutine will solve the following problem.
#  Given a number x1, find a number x2 so that the integral over
#  the standard normal distribution, taken between x1 and x2, is
#  just the value of $prob.
#  Of course, if you select $prob > 1, this is impossible. Also, for
#  some values of x1 it may be the case that the remaining probability
#  mass under the standardnormal is insufficient to yield the given
#  probability $prob. In such cases, the subroutine will return the
#  special value '1e30' meaning plus infinity.
#  find_highbound assumes the global variables $Multiplier and
#  $F[] which are set by init_find_highbound().
sub find_highbound {
  my($lowbound,$prob) = @_;
  $infty = 1e30;
  if ($Multiplier == 0) { init_find_highbound(); }
  if ($prob >= 1.0) { return $infty; }
  $lowboundI = int($Multiplier*$lowbound);
  if ($lowboundI <= -$#F) { 
    $lowboundP = 0.0; # low bound close to minus infinity
  } else {
    if ($lowboundI < 0) { 
      $lowboundP = 1.0 - $F[-$lowboundI];
    } else {
      $lowboundP = $F[$lowboundI];
    }
  }
  # now I have the probability mass from -infinity to $lowbound

  # compute the desired probability mass from -infinity to $highbound:
  $highboundP = $lowboundP + $prob;

  if ($highboundP >= 1.0) { return $infty; }
  
  if ( $highboundP < 0.5 ) {
    # the end point will be smaller than zero
    $highboundP = 1.0 - $highboundP;
    for ($t=0; $t < $#F; $t++) { 
      if ($F[$t] >= $highboundP) { return(-$t/$Multiplier); }
    }
    return(-$#F / $Multiplier);
    
  } else {
    # the end point will be larger than zero
    for ($t=0; $t < $#F; $t++) { 
      if ($F[$t] >= $highboundP) { return($t/$Multiplier); }
    }
    return $infty;
  }
}

sub usage {
  print "Usage: [perl] <this script> <name of feature file in SNNS format> <column used as output>\n";
  return;
}
sub numerically { return $a <=> $b; }
sub numerically_decreasing { return $b <=> $a; }

if ($#ARGV < 0) { usage(); die; }

$filename = $ARGV[0];
$Tagcolumn = $ARGV[1];

open(FH,$filename) || die "Could not open $filename\n";

$n=0;
while ($line = <FH>) {
  $line =~ s/\n//;
  @items = split(/ /,$line);
  $tag = $items[$Tagcolumn]; # the training target, class tag
  $tagFA{$tag}++;
  for ($i=0; $i <= $#items; $i++) {
    $sum[$i] += $items[$i];
    $sos[$i] += $items[$i] * $items[$i];
  }
  $n++;
}

close(FH);

@allkeys = keys(%tagFA);
$nkeys = $#allkeys; $nkeys++;

if ($Verbose > 0) {
  print "\nBASIC STATISTICS\n";
  print "================\n";
  print "File: $filename\n";
  print "Have $n lines with $#items elements (excluding the tag column) each\n";
  if ($Verbose > 1) { print "There are $nkeys different tags in the tag column.\n"; }
  if ($Verbose > 2) { print " Here are the different tags: @allkeys \n"; }
  if ($Verbose > 1) { print "Here are the means and variances of all features (and all tag values):\n"; }

}

for ($i=0; $i < $#items; $i++) {
  $mean[$i] = $sum[$i] / $n;
  $variance[$i] = $sos[$i]/$n - $mean[$i]*$mean[$i];
  $joint[$i] = "$variance[$i] $mean[$i] $i";
  $Mean{"all,$i"} = $mean[$i];
  $Variance{"all,$i"} = $variance[$i];
  if ($Verbose > 1) {print "Feature $i: mean $mean[$i], variance $variance[$i]\n";}
}
$N{"all"} = $n;

# 1st thing to do: check for identical lines
#  they have identical variances
@sorted = sort numerically @joint;
for ($i=0; $i < $#items; $i++) {
  @words = split(/ /,$sorted[$i]);
  $sortvar[$i] = $words[0];
}

print "\n-------------------------------------------------------------------------\n";
print "Checking for irrelevant features\n";
print "================================\n";

for ($i=0; $i < $#items-1; $i++) {
  if ( ( ($sortvar[$i+1] - $sortvar[$i]) / $sortvar[$i+1]) < 0.001 ) {
    # relative difference less than 1/10%
    @words = split(/ /,$sorted[$i]);
    @words2 = split(/ /,$sorted[$i+1]);
    print "Suspiciously close variances between features $words[2] and $words2[2] :";
    print (($sortvar[$i+1] - $sortvar[$i]) / $sortvar[$i+1]); print "\n";
  }
}

print "\n-------------------------------------------------------------------------\n";

for ($tagI = 0; $tagI <= $#allkeys; $tagI++) {
  $Tag = $allkeys[$tagI]; # typically, 0.000 or 1.000
  $n = 0; for ($i=0; $i <= $#items; $i++) { $sum[$i] = 0; $sos[$i] = 0; } ; # clear accumulator
  if ($Verbose > 0) { print "Analyzing tag $Tag "; }
  open(FH,$filename);
  while ($line = <FH>) {
    $line =~ s/\n//;
    @items = split(/ /,$line);
    $tag = $items[$Tagcolumn]; # the training target, class tag
    if ($tag == $Tag) {
      for ($i=0; $i <= $#items; $i++) {
	$sum[$i] += $items[$i];
	$sos[$i] += $items[$i] * $items[$i];
	$ItemA{"$Tag,$n,$i"} = $items[$i]; # all data - ItemA{Tag,i,columnX} where i = 0...$N{$Tag}
      }
      $n++;
    }
  }
  close(FH);
  # OK, statistics for this tag are now here
  #  fill global info arrays $N, $Mean, $Variance
  #
  $N{$Tag} = $n;
  if ($Verbose > 0) { print "($n occurrences)\n"; }
  for ($i=0; $i <= $#items; $i++) {
    $Mean{"$Tag,$i"} = $sum[$i] / $n;
    $Variance{"$Tag,$i"} = $sos[$i]/$n - $sum[$i]/$n * $sum[$i]/$n;
    if ($Verbose > 2) { 
      $m = $Mean{"$Tag,$i"}; $v = $Variance{"$Tag,$i"};
      print "Tag $Tag, feature $i: mean $m variance $v\n";
    }
  }
  
}
  
# for the 2-class case I can compute the empirical Bayes Error
#  this is just done by finding the best threshold
if ($#allkeys == 1) {
  $Tag1 = $allkeys[0]; # typically, 0.000 or 1.000
  $Tag2 = $allkeys[1]; # typically, 1.000 or 0.000

  if ( $N{$Tag1} < $N{$Tag2} ) {
    $baseline = $N{$Tag2} / ( $N{$Tag1} + $N{$Tag2} );
  } else { 
    $baseline = $N{$Tag1} / ( $N{$Tag1} + $N{$Tag2} );
  }
  print "Analyzing estimated Bayes Error for all features\n";

  # now, go thru all columns, one by one
  for ($columnX=0; $columnX < $#items; $columnX++) {
    $|=1; print "$columnX ";
    for ($i=0; $i < $N{$Tag1}; $i++) {
      $class1[$i] = $ItemA{"$Tag1,$i,$columnX"};
    }
    for ($i=0; $i < $N{$Tag2}; $i++) {
      $class2[$i] = $ItemA{"$Tag2,$i,$columnX"};
    }
    @class1sorted = sort numerically @class1;
    @class2sorted = sort numerically @class2;
    @class3 = (@class1,@class2);
    @class3sorted = sort numerically @class3;

    
    #for ($i=0; $i < 20; $i++) { print "class1: $class1sorted[$i], class2: $class2sorted[$i], class3: $class3sorted[$i]\n"; }
    $idx1 = 0; $idx2 = 0;
    # run thru all keys and evaluate each of them:
    $maxperformance = 0.0;
    foreach $key (@class3sorted) {
      #print "!$key!";
      if ($idx1 <= $#class1sorted) { while ($class1sorted[$idx1] < $key) { $idx1++; if ($idx1 > $#class1) { goto l1; } } }
    l1:
      if ($idx2 <= $#class2sorted) { while ($class2sorted[$idx2] < $key) { $idx2++; if ($idx2 > $#class2) { goto l2; } } }
    l2:
      #if ( $Mean{$Tag1,$columnX} < $Mean{$Tag2,$columnX} ) {
	# class1 is lower - $idx1 entries in class1 are correctly classified
	# with this threshold, $idx2 entries in class2 are incorrectly classified
	$correct = $idx1 + 1 + $N{$Tag2} - $idx2 - 1;
      #} else {
      #$correct = $idx2 + 1 + $N{$Tag1} - $idx1 - 1;
      #}
      $thisperformance = $correct / ($N{$Tag1} + $N{$Tag2});
      if ($thisperformance < 0.5) { $thisperformance = 1.0 - $thisperformance; }
      #print "$key $idx1 $idx2 $correct $thisperformance\n";
      if ($thisperformance > $maxperformance) { $maxperformance = $thisperformance; $bestkey = $key; }
    }
    $Performance[$columnX] = "$maxperformance $columnX";
    #print "column $columnX, Bayes Performance $maxperformance at $bestkey (Baseline $baseline)\n";
  }
  print "\n\n\nFeatures, ordered by increasing estimated Bayes Error:\n";
  print "======================================================\n";
  @perf_sorted = sort numerically_decreasing @Performance;
  $baseline = (1.0 - $baseline) * 100.0;
  printf("\nBaseline error rate: %6.3f%%\n\n", $baseline);
  foreach $entry (@perf_sorted) {
    ($perf,$columnX) = split(/ /,$entry);
    $perf = 100.0 * (1.0 - $perf);
    if ($columnX != $Tagcolumn) {
      if ( 1.0001*$perf >= $baseline) {
	printf("Column %3d - Bayes Error %6.3f%% [does not beat the baseline!]\n", $columnX, $perf); 
      } else { 
	printf("Column %3d - Bayes Error %6.3f%% (%g +- %g / %g +- %g)\n", $columnX, $perf, $Mean{"$Tag1,$columnX"},sqrt($Variance{"$Tag1,$columnX"}),$Mean{"$Tag2,$columnX"},sqrt($Variance{"$Tag2,$columnX"}));
      }
    }
  }
}




print "Checking normality assumption\n";
print "=============================\n";


# For the test [Bamberg/Baur pp 199 ff] I need at least 4 intervals
#  for the normal distribution test, and at least 3 for the poisson distribution
#  this is because the test distribution is chi^2 of (n - #paramsOfDistribution - 1)
#  and the normal has 2 params, and I need at least chi^2(1)
#  so go for 4 boxes, or more boxes but have at least 5 entries in each box
#  the 5 is a necessary prerequisite for the accuracy of the test ([Bamberg/Baur, p 200])
$minnum = 5;

print "Step 1 - normalizing all data to be N(0,1) distributed\n";
foreach $Tag (@allkeys) {
  print "Tag: $Tag, processing column ";
  if ( $N{$Tag} < 4 * $minnum ) {
    print "\nWARNING! For tag $Tag, there are not enough data points (only $N{$Tag}) to check the distribution assumption!\n";
  } else {
    for ($columnX = 0; $columnX < $#items; $columnX++) {
      print "$columnX ";
      if ($Variance{"$Tag,$columnX"} == 0) {
	print "\nWARNING! For tag $Tag and column $columnX, the variance of the feature is zero!\n";
      } else {
	for ($t=0; $t < $N{$Tag}; $t++) {
	  $ItemA{"$Tag,$t,$columnX"} = ($ItemA{"$Tag,$t,$columnX"} - $Mean{"$Tag,$columnX"}) / sqrt($Variance{"$Tag,$columnX"});
	  #print "normalize tag $Tag, t $t, columnX $columnX to "; print $ItemA{"$Tag,$t,$columnX"}; print "\n";
	}
      } # if the variance of the feature is nonzero
    } # for all columns
  } # if there is enough data
  print "\n";
} # foreach $Tag
  
print "Step 2 - find the bins\n";
foreach $Tag (@allkeys) {
  $n = $N{$Tag};
  if ( $n < 4 * $minnum ) {
    print "\nWARNING! For tag $Tag, there are not enough data points (only $N{$Tag}) to check the distribution assumption!\n";
  } else {
    $binsizeP = $minnum / $n; # this is a probability
    if ($binsizeP < 0.033) { $binsizeP = 0.033; } # more than 30 bins... make them 30 (reduce computation)
    $expect = $binsizeP * $n; # this many I expect in each bin
    for ($columnX = 0; $columnX < $#items; $columnX++) {
      $chi = 0.0;
      #print "$columnX : bins of probability $binsizeP each\n";
      $lowbound = -1e30; # minus infinity :-)
      $highbound = 0; # just something to cause the while loop to be entered (no do... loop in perl)
      while ($highbound < 1e30) {
	$highbound = find_highbound($lowbound,$binsizeP);
	# the bin is found... check how many entries we have inside that bin!
	$v = 0;
	for ($t=0; $t < $n; $t++) {
	  #print "\nitem Tag $Tag, t $t, columnX $columnX: "; print $ItemA{"$Tag,$t,$columnX"};
	  if ($ItemA{"$Tag,$t,$columnX"} > $lowbound) {
	    if ($ItemA{"$Tag,$t,$columnX"} <= $highbound) { $v++; }
	  }
	}
	# $v is the number of entries inside the bin.
	$chi = $chi + ($v - $expect)*($v-$expect)/$expect;
	# print "Diagnostic: Tag $Tag, column $columnX, bin from $lowbound to $highbound, have $v entries, should have $expect\n";
	$lowbound = $highbound;
      }
      $chidim = int(1.0/$binsizeP)-1-2;
      print " Tag $Tag ($n data points). Column $columnX. Result = $chi, compare this to tabled value of chi^2($chidim) for the desired significance level\n";
    }
    
  }
}      



      



exit;
