#!/usr/bin/perl
#Programmer: Rory Carmichael
#Purpose: Create shuffled versions of a given cluster alignment
#Usage: rc-shuffle-clust.pl <cumulative?(y/n/a)> <comparison_method> <count> <weight> <input cluster alignment> 

use List::Util qw[min max];
use Math::Complex;
use Switch;

my $cumulative = shift;
my $comparison_method = shift;
my @thing = ();
my @newthing = ();
my @holds = ();
my @shuffle_lengths = ();
my $max = 0;
my %first_hist;
my @gaptrix = ();
my $count = shift;
my $fname = $ARGV[0];
my $org_count = 0;
#Variables added to permit end-position de-emphasis
my $margin_size = 50;
my $margin_weight = shift; 
my @ungaplen;

sub a_max {
	my $max;
	undef($max);
	foreach my $thing (@_) {
		if(!defined($max) || $thing > $max) {
			$max = $thing;
		}
	}
	return $max;
}

sub a_min {
	my $min;
	undef($min);
	foreach my $thing (@_) {
		if(!defined($min) || $thing < $min) {
			$min = $thing;
		}
	}
	return $min;
}

sub a_select {
	my ($k, @array) = @_;
	my $r = int(rand(scalar(@array)));
	my $pivot = $array[$r];
	my @left;
	my @right;
	for(my $i=0;$i<scalar(@array);$i++) {
		if($array[$i] < $pivot) {
			push(@left,$array[$i]);
		} elsif($array[$i] > $pivot) {
			push(@right,$array[$i]);
		}
	}
	my $right_size = scalar(@array) - scalar(@right);
	if ($k <= scalar(@left) && scalar(@left) > 0) {
		return &a_select($k,@left);
	} elsif ($k > $right_size && scalar(@right) > 0) {
		return &a_select($k - $right_size, @right);
	} else {
		return $pivot;
	}
}

sub box_plot {
	my @array = @_;
	if(scalar(@array) == 1) {
		return ($array[0],$array[0],$array[0],$array[0],$array[0]);
	}
	my $max = &a_max(@array);
	my $min = &a_min(@array);
	my ($f_quart,$s_quart,$t_quart) = 0;
	my $f_k = int(scalar(@array)/4);
	my $s_k = int(scalar(@array)/2);
	my $t_k = $s_k + $f_k;
	if($t_k > $#array) { $t_k = $#array }
	$f_quart = &a_select($f_k,@array);
	$s_quart = &a_select($s_k,@array);
	$t_quart = &a_select($t_k,@array);
	return ($min,$f_quart,$s_quart,$t_quart,$max);
}

#modified to permit weighting of ends
sub read_file {
	my @ungappos;
	my @lines;
	#we'll read the lines into @lines while counting the length of each sequence
	while (my $line = <>) {
		push(@lines, $line);
		if ($line !~ /[^0-9\s-]/) {
			chomp $line;
			my @splitline = split(/\t/, $line);
			for(my $i=0;$i<=$#splitline;$i++) {
				if($splitline[$i] ne "-") {
					$ungaplen[$i]++;
				}
			}
		}
	}

#edited to use @lines, rather than reading from stream directly (yes we are now holding everything in memory)
#	while (my $line = <>) {
	foreach my $line (@lines) {
		if ($line !~ /[^0-9\s-]/) {
			chomp $line;
			my @splitline = split(/\t/, $line);
			my $ovls = 0;
			for(my $i=0;$i<=$#splitline;$i++) {
				if ($splitline[$i] eq "0") {
					$ungappos[$i]++;
					if($holds[$i] > 0) {
						$holds[$i]--;
						#If we have gotten to the end of the sequence, start using reduced weights
#						if($ungaplen[$i] - $ungappos[$i] > $margin_size) {
						#If we are still within margin bases of the sequence start, use reduced weights
						if($ungappos[$i] > $margin_size) {
							$ovls++;
						} else {
							$ovls+=1*$margin_weight;
						}
					} else {
						$thing[$i] .= $splitline[$i] . "\t";
						$shuffle_lengths[$i]++;
					}
					$gaptrix[$i][$max] = " ";
				} elsif ($splitline[$i] eq "-") {
					$gaptrix[$i][$max] = "-";
				} else {
					$ungappos[$i]++;
					if ($holds[$i] != 0) {
						print STDERR "Something went terribly wrong and we have a cluster starting in a cluster ($lnum,$i)\n";
					} else {
						$thing[$i] .= $splitline[$i] . "\t";
						$shuffle_lengths[$i]++;
						$holds[$i] = $splitline[$i] - 1;
						#If we have gotten to the end of the sequence, start using reduced weights
#						if($ungaplen[$i] - $ungappos[$i] > $margin_size) {
						#If we are still within margin bases of the sequence start, use reduced weights
						if($ungappos[$i] > $margin_size) {
							$ovls++;
						} else {
							$ovls+=1*$margin_weight;
						}
					}
					$gaptrix[$i][$max] = " ";
				}
			}
			$max++;
			$first_hist{$ovls}++;
		}
	}
}

#Helper Functions for measures
sub hist_mean {
	my $r = shift;
	my $sum = 0;
	my $sample = 0;
	my %h = %{$r};
	foreach my $key (keys(%h)) {
		$sum += $key * $h{$key};
		$sample += $h{$key};
	}
	if($sample == 0) {
		return 0;
	}
	return $sum/$sample;
}

sub max_key {
	my $r = shift;
	my $largest = -1;
	foreach my $key (keys(%$r)) {
		if($key > $largest) {
			$largest = $key;
		}
	}
	return $largest;
}

sub make_cumulative {
	my $r = shift;
	my %h = %{$r};
	my $cum = 0;
	for(my $i=max(keys(%h));$i>=0;$i--) {
		$h{$i} += $cum;
		$cum = $h{$i};
	}
	return %h;
}

#Different interestingness measures
sub calc_skew {
	#Not likely to be much good in the non-cumulative mode since bi-modal distributions don't show up as skewed, despite being way more interesting
	my $r = shift;
	my %h = %{$r};
	my $mean = &hist_mean($r);
	my $top_sum = 0;
	my $bottom_sum = 0;
	foreach my $key (keys(%h)) {
		$top_sum += $h{$key}*($key-$mean)**3;
		$bottom_sum += $h{$key}*($key-$mean)**2;
		$n += $h{$key};
	}
	if($bottom_sum == 0 || $n == 0 || $n == 2) { return -1;
	}
	my $top = $top_sum / $n;
	my $bottom = ($bottom_sum/$n)**(3/2);
	my $g1 = $top/$bottom;
	my $G1 = $g1*(sqrt($n*($n-1))/($n-2));
	#Now that I think of it, I'm not sure whether we want sample skewness or the population skewness estimator
	return $G1;
}

sub calc_kurtosis {
	my $r = shift;
	my %h = %{$r};
	my $mean = &hist_mean($r);
	my $top_sum = 0;
	my $bottom_sum = 0;
	foreach my $key (keys(%h)) {
		$top_sum += $h{$key}*($key-$mean)**4;
		$bottom_sum += $h{$key}*($key-$mean)**2;
		$n += $h{$key};
	}
	if($bottom_sum == 0 | $n == 0) {
		return -1;
	}
	my $top = $top_sum / $n;
	my $bottom = ($bottom_sum/$n)**2;
	my $g2 = $top/$bottom;
	my $G2 = (($n - 1)/(($n-2)*($n-3)))*(($n+1)*$g2+6);
	#Again, not sure whether we want sample kurtosis or population kurtosis estimator
	return $G2;
}

sub calc_n99 {
	my $hash_ref = shift;
	my %hash = %{$hash_ref};
	my $sofar = 0;
	my $n99 = -1;
	foreach my $key (sort(keys(%hash))) {
		$sofar += $hash{$key};
		if ($sofar >= 0.99*$max) {
			$n99 = $key;
			last;
		}
	}
	return $n99;
}

sub solomon_size {
	my ($cutoff, $r1) = @_; my %h1 = %{$r1};
	my $size = 0;
	for(my $i=&max_key($r1);$i>=$cutoff;$i--) {
		$size += $h1{$i};
	}
	return $size;
}

#Need to think about this better
sub cross_over {
	my ($r1,$r2) = @_;
	my %h1 = %{$r1};
	my %h2 = %{$r2};
	my $max1 = &max_key($r1);
	my $max2 = &max_key($r2);
	my $pos_before_crossover = 0;
	for(my $i=max($max1,$max2);$i>=0;$i--) {
		my $oneval = 0;
		my $twoval = 0;
		if (exists($h1{$i})) {
			$oneval = $h1{$i};
		}
		if (exists($h2{$i})) {
			$twoval = $h2{$i};
		}
		if ($oneval >= $twoval) {
			$pos_before_crossover++;
		} else {
			last;
		}
	}
	return $pos_before_crossover;
}

#Need to think about this better
sub ever_better {
	my ($r1,$r2) = @_;
	my %h1 = %{$r1};
	my %h2 = %{$r2};
	my $max1 = &max_key($r1);
	my $max2 = &max_key($r2);
	my $greater_positions = 0;
	$onesum = 0;
	$twosum = 0;
	for(my $i=max($max1,$max2);$i>=0;$i--) {
		if (exists($h1{$i})) {
			$onesum = $h1{$i};
		}
		if (exists($h2{$i})) {
			$twosum = $h2{$i};
		}
		if ($onesum >= $twosum) {
			#Weight things linearly by position (we care more about a right skew if it happens with higher numbers)
			$greater_positions += $i;
		}
	}
	return $greater_positions;
}

#Comparison function
sub compare_hists {
	my ($ctype,$r1,$r2) = @_;
	my $s1 = 0;
	my $s2 = 0;
	switch($ctype) {
		case /n99/ {
			$s1 = &calc_n99($r1);
			$s2 = &calc_n99($r2);
		}
		case /crossover/ {
			$s1 = &cross_over($r1,$r2);
			$s2 = &cross_over($r2,$r1);
		}
		case /skew/ {
			$s1 = &calc_skew($r1);
			$s2 = &calc_skew($r2);
			if ($s1 == -1 || $s2 == -1) {
				return 0;
			}
		}
		case /kurtosis/ {
			#Not sure whether it is high kurtosis or low kurtosis that we are looking for
			$s1 = &calc_kurtosis($r1);
			$s2 = &calc_kurtosis($r2);
		}
		case /everbetter/ {
			#are the sums of the depth-weighted positions where $s1 has more positions of greater depth than $s2 greater than the same sums for $s2 with respect to $s1
			$s1 = &ever_better($r1,$r2);
			$s2 = &ever_better($r2,$r1);
		}
		case /solomon/ {
			$s1 = &solomon_size($org_count/4,$r1);
			$s2 = &solomon_size($org_count/4,$r2);
		}
	}

	if ($s1 > $s2) {
		return 1;
	} elsif ($s1 == $s2) {
		return 0;
	} else {
		return -1;
	}
}

#Comparison function, but weight the histograms first
sub compare_hists_weighted {
	my ($ctype,$r1,$r2) = @_;
	my %h1 = %{$r1};
	my %h2 = %{$r2};
	my $max1 = &max_key($r1);
	my $max2 = &max_key($r2);
	for(my $i=max($max1,$max2);$i>=0;$i--) {
		if (exists($h1{$i})) {
			$h1{$i} = $h1{$i} * $i;
		}
		if (exists($h2{$i})) {
			$h2{$i} = $h2{$i} * $i;
		}
	}
	return &compare_hists($ctype,\%h1,\%h2);
}

sub search_size {
	my $ref = shift;
	my @thing = @{$ref};
	my @orderings = ();
	foreach my $line (@thing) {
		my %uniqs;
		my @splitline = split(/\t/, $line);
		foreach my $val (@splitline) {
			$uniqs{$val} += 1;
		}
		push(@orderings,scalar(@splitline) . "!/(" . join("!*",values(%uniqs)) . "!)");
	}
	return "(" . join(")*(", @orderings) . ")";
}

sub shuffle_matrix {
	my $ref = shift;
	my @thing = @{$ref};
	my @newthing = ();

	my $k=0;
	foreach my $line (@thing) {
		my @splitline = split(/\t/, $line);
		#Do a Fisher-Yates/Knuth shuffle on the adjusted array
		for(my $i=$#splitline;$i>0;$i--) {
			my $j = int(rand($i+1));
			my $tmp = $splitline[$j];
			$splitline[$j] = $splitline[$i];
			$splitline[$i] = $tmp;
		}

		#add the extra 0's and "-"s back in
		my @newsplit = ();
		my $j=0;
		foreach my $val (@splitline) {
			#gaptrix is global
			while($gaptrix[$k][$j] eq "-") {
				push(@newsplit, "-");
				$j++;
			}
			push(@newsplit, $val);
			$j++;
			for(my $i=0;$i<($val-1);$i++) {
				while ($gaptrix[$k][$j] eq "-") {
					push(@newsplit, "-");
					$j++;
				}
				push(@newsplit, 0);
				$j++;
			}
		}
		$newthing[$k] = [ @newsplit ];
		$k++;
	}
	return @newthing;
}

#Read the file
&read_file();
$org_count = scalar(@thing);

#Calculate search space
#print STDERR "Search space is: " . &search_size(\@thing) . "\n";

#my $fails = 0;
my %f_hash;
if ($comparison_method eq "all") {
	%f_hash = ("n99" => 0, "crossover" => 0, "skew" => 0, "kurtosis" => 0, "everbetter" => 0, "solomon" => 0);
} else {
	%f_hash = ("$comparison_method" => 0);
}

#Optionally, make all histograms cumulative
if($cumulative eq 'a') {
	%first_hist = &make_cumulative(\%first_hist);
}

my $tot = $count;
%agg_hist;
my @startkeys = keys(%f_hash);
for($count;$count>0;$count--) {
	#Shuffle the file
	@newthing = &shuffle_matrix(\@thing);

	#Quantify the shuffle
	my @buffer = ();
	my @holds = ();
	my %ovl_hist;
	my @ungappos;
	for(my $i=0;$i<$max;$i++) {
		my $ovls = 0;
		for(my $j=0;$j<=$#thing;$j++) {
			if($newthing[$j][$i] eq "0") {
				$ungappos[$j]++;
				if($holds[$j] > 0) {
					$holds[$j] --;
					#If we have gotten to the end of the sequence, start using reduced weights
#					if($ungaplen[$j] - $ungappos[$i] > $margin_size) {
					#If we are still within margin bases of the sequence start, use reduced weights
					if($ungappos[$j] > $margin_size) {
						$ovls++;
					} else {
						$ovls+=1*$margin_weight;
					}

				} 
			} elsif ($newthing[$j][$i] == "-") {
				
			} else {
				$ungappos[$j]++;
				if($holds[$j] != 0) {
					print STDERR "Something went terribly wrong and we have a cluster starting in a cluster\n";
				} else {
					$holds[$j] = $newthing[$j][$i] - 1;
					#If we have gotten to the end of the sequence, start using reduced weights
#					if($ungaplen[$j] - $ungappos[$i] > $margin_size) {
					#If we are still within margin bases of the sequence start, use reduced weights
					if($ungappos[$j] > $margin_size) {
						$ovls++;
					} else {
						$ovls+=1*$margin_weight;
					}
				}
			}
		}
		$ovl_hist{$ovls}++;
	}

	#Optionally, make all histograms cumulative
	if($cumulative eq 'a') {
		%ovl_hist = &make_cumulative(\%ovl_hist);
	}

	while (my ($key,$val) = each(%ovl_hist)) {
		push(@{$agg_hist{$key}}, $val);
	}

	foreach my $cm (@startkeys) {
		if (&compare_hists($cm,\%first_hist,\%ovl_hist) <= 0) {
			$f_hash{$cm}++;
		}
		my $weighted = $cm . "_weighted";
		if (!(exists($f_hash{$weighted}))) {
			$f_hash{$weighted} = 0;
		}
		if(&compare_hists_weighted($cm,\%first_hist,\%ovl_hist) <= 0) {
			$f_hash{$weighted}++;
		}
		#Optionally calculate statistics on cumulative histograms as well
		if($cumulative eq 'y') {
			my %first_cum = &make_cumulative(\%first_hist);
			my %cur_cum = &make_cumulative(\%ovl_hist);
			my $cm_cum = $cm . "_cumulative";
			if (!(exists($f_hash{$cm_cum}))) {
				$f_hash{$cm_cum} = 0;
			}
			if (&compare_hists($cm,\%first_cum,\%cur_cum) <= 0) {
				$f_hash{$cm_cum}++;
			}
			my $cm_cum_weighted = $cm . "_cumulative_weighted";
			if (!(exists($f_hash{$cm_cum_weighted}))) {
				$f_hash{$cm_cum_weighted} = 0;
			}
			if (&compare_hists_weighted($cm,\%first_cum,\%cur_cum) <= 0) {
				$f_hash{$cm_cum_weighted}++;
			}
		}
	}
}

my @list;
foreach my $cm (sort(keys(%f_hash))) {
	push (@list, $f_hash{$cm}/$tot);
}
my $header = join("\t", "cluster", sort(keys(%f_hash)));
my $answers = join("\t", @list);
print "$header\n";
print "$fname\t$answers\n";
print "real hist:\n";
foreach my $key (sort({$a <=> $b} keys(%first_hist))) {
	my $val = $first_hist{$key};
	print "$key\t$val\n";
}

print "mean boxes:\n";
foreach my $key (sort({$a <=> $b} keys(%agg_hist))) {
	if(scalar(@{$agg_hist{$key}}) >= 1) {
		print join("\t", $key, &box_plot(@{$agg_hist{$key}})) . "\n";
	}
}
