######################################################################
# Time-stamp: <05/08/09 16:15:50 ostolop>
#
# Package: EP::Core::Data::Parser::exp
#
# Expression Data Parser Classes
######################################################################
package EP::Core::Data::Parser::exp::user;

use strict;
use base qw ( EP::Core::Data::Parser );

use EP::Config;
use EP::Common;
use EP::Common::General;

use PDL;
use PDL::NiceSlice;
use PDL::IO::FlexRaw;
use PDL::IO::Misc;

use File::Temp;
use File::Spec;
use Tie::IxHash;

# sub: parse_data
# processes the uploaded data and creates a corresponding entry in the database
sub
parse_data
    {
    my $self = shift;

    my $data = $self->upload_data();

    warn "Uploaded file name is: $data->{data}" if DEBUG > 1;
    warn "Uploaded data size is: ", -s "$data->{data}" if DEBUG > 1;

    my $cur_folder = $self->{epc}->{session}->current_folder;
    my $new_filename = mktemp ( File::Spec->catfile ( $EP::Config::h->{EPNG_USRDATA_PATH}, $cur_folder->location, "data.$$.XXXXX" ) );

    throw EP::Common::Error ( -value => CUD_NO_DATA_SUPPLIED ) if -z "$data->{data}";

    if ( $self->_process_uploaded_data( $data ) )
        {
        my ($dataset) = EP::Common::DBI->do_transaction(
            sub {
                my ($self) = @_;

                my $dataset = EP::Common::DBI::Dataset->create( { name        => substr($data->{dataset_name}, 0, 72) . " : ". localtime,
                                                                  description => $self->{query}->param ( "dataset_description" ) . "",
                                                                  type        => "exp",
                                                                  species     => $self->{query}->param ( "dataset_species" ) . "",
                                                                  folder      => $cur_folder->id,
                                                                  ep_session  => $self->{epc}->{current_user}->session_only ? $self->{epc}->{session}->id : undef,
                                                                  owner       => $self->{epc}->{current_user},
                                                                  group_write => 0,
                                                                  group_read  => 1,
                                                                  other_read  => 0
                                                                } );

                $self->_write_dataset_data( $dataset, $data ) or die "Failed to write the dataset data.";
                return $dataset;
                }
            , $self
        );

        return $dataset;
        }

    die "Failed to process uploaded data.";
    }

# sub: _process_uploaded_data
# Processes uploaded data (identifies file, reads in the related data, sets it up for further use)
# WRITE MORE HERE.
sub
_process_uploaded_data
    {
    my $self = shift;
    my ( $data ) = @_;

    my $query = $self->{query};
    system ( 'perl -pi -e \'s/\r\n$/\n/\' ' . $data->{data} ) if `file $data->{data}` =~ /CRLF line terminators/;
    # Mac users will appreciate that one :)
    system ( 'perl -pi -e \'s/\r/\n/g\' ' . $data->{data} ) if `file $data->{data}` =~ /CR line terminators/;

    # see what format the file has
    my ($delim_desc, $delim) = $self->_identify_file( $data, $query );
    throw EP::Common::Error ( -value => CUD_CANNOT_IDENTIFY_FILE, -line => __LINE__, -file => __FILE__ ) if not $delim;

    $data->{filetype}  = $delim_desc;

    # for older queries that might still be sending nr_annot_columns/rows.
    $query->param( 'm', $query->param('nr_annot_columns') + 2 ) if defined $query->param('nr_annot_columns');
    $query->param( 'n', $query->param('nr_annot_rows') + 2 )    if defined $query->param('nr_annot_rows');

    my $nr_annot_columns = defined $query->param('m') ? $query->param('m') - 2 : 0;
    my $nr_annot_rows    = defined $query->param('n') ? $query->param('n') - 2 : 0;

    chomp (my $col_header = `sed 1q "$data->{data}"`);
    $col_header =~ s/\s*$//;    # chop off trailing whitespace
    my $total_columns  = () = $col_header =~ /$delim/g;

    my $pdl_read_data = $self->_pdl_read_data( file             => $data->{data},
                                               cols             => $total_columns + 1,
                                               nr_annot_rows    => $nr_annot_rows,
                                               nr_annot_columns => $nr_annot_columns,
                                               delim            => $delim );

    $data->{matrix} = $pdl_read_data->{pdl};

    foreach my $col ( 0 .. $#{$pdl_read_data->{col_names}} )
        {
        chomp ( my $col_name = $pdl_read_data->{col_names}[$col] );
        $data->{column}{$col}->{full_annot} = new Tie::IxHash if not exists $data->{column}{$col}->{full_annot};
        $data->{column}{$col}->{full_annot}->Push( $pdl_read_data->{col_id_name}, $col_name );
        }

    foreach my $col_annot_nr ( 0 .. $#{$pdl_read_data->{col_annot_names}} )
        {
        foreach my $col ( 0 .. $#{$pdl_read_data->{col_names}} )
            {
            my $annot_name = $pdl_read_data->{col_annot_names}[$col_annot_nr];
            chomp ( my $annot = $pdl_read_data->{col_annots}[$col][$col_annot_nr] );
            $data->{column}{$col}->{full_annot}->Push( $annot_name, $annot );
            }
        }

    foreach my $row ( 0 .. $#{$pdl_read_data->{row_names}} )
        {
        chomp ( my $row_name = $pdl_read_data->{row_names}[$row] );
        $data->{row}{$row}->{full_annot} = new Tie::IxHash if not exists $data->{row}{$row}->{full_annot};
        $data->{row}{$row}->{full_annot}->Push( $pdl_read_data->{row_id_name}, $row_name );
        }

    foreach my $row_annot_nr ( 0 .. $#{$pdl_read_data->{row_annot_names}} )
        {
        foreach my $row ( 0 .. $#{$pdl_read_data->{row_names}} )
            {
            my $annot_name = $pdl_read_data->{row_annot_names}[$row_annot_nr];
            chomp ( my $annot = $pdl_read_data->{row_annots}[$row_annot_nr][$row] );
            $data->{row}{$row}->{full_annot}->Push( $annot_name, $annot );
            }
        }

    warn "Uploaded ". $pdl_read_data->{pdl}->info() if DEBUG;

    unlink $data->{data} unless $query->param("src_pubdb");
    unlink $data->{row_annot} unless $query->param("src_pubdb");
    unlink $data->{column_annot} unless $query->param( "src_pubdb");

    return OK;
    }

# sub: _pdl_read_data
# Uses <PDL::IO::FlexRaw> to read in the data from a delimited flat file.
#
# parameters:
# file             - file containing the data (filename/filehandle)
# cols             - total number of columns in the file
# nr_annot_rows    - number of rows (after 1st) to use for annotation
# nr_annot_columns - number of cols (after 1st) to use for annotation
# delim            - data delimiter (can be a regexp)
#
# returns: 
# A hash reference, containing the following:
# pdl         - data matrix piddle,
# row_id_name - name of column containing row ids
# col_id_name - name of column containing column ids; if not supplied, returns 'column_id'
# row_annots  - ref to array of row annotations
# row_annot_names - ref to array of row annotation names
# col_annots  - ref to array of column annotations
# col_annot_names - ref to array of column annotation names
# row_names   - ref to array of row names
# col_names   - ref to array of column names
sub
_pdl_read_data
    {
    my $self = shift;
    my %params = @_;

    open DATA, $params{file} or die "Can't open $params{file} for reading.";

    my @first_row;

    if ( $params{nr_annot_rows} < 0 )
        {
        if ( $params{nr_annot_columns} >= 0 )
            {
            my $dcols = $params{cols} - $params{nr_annot_columns} - 1;
            @first_row = map {"C$_"} $params{nr_annot_columns} .. $dcols;
            }
        else
            {
            @first_row = map {"C$_"} 1 .. $params{cols};
            }

        $params{nr_annot_rows} = -1;
        }
    else
        {
        my $row1 = <DATA>;
        $row1 =~ s/\s*$//; # chop off trailing whitespace
        @first_row = split /$params{delim}/, $row1;
        }

    if ( $params{nr_annot_columns} < 0 )
        {
        $params{nr_annot_columns} = -1;
        }

    my @row_col_id_names = split /\\/, $first_row[0] if $params{nr_annot_rows} >= 0 and $params{nr_annot_columns} >= 0;
    @row_col_id_names = qw ( row_id column_id ) if $params{nr_annot_rows} < 0 or $params{nr_annot_columns} < 0;

    my @row_annot_names = @first_row[1 .. $params{nr_annot_columns}] if $params{nr_annot_columns} >= 0;
    my @col_names       = @first_row[$params{nr_annot_columns} + 1 .. $#first_row];

    my @col_annots;
    my @col_annot_names;

    while ( scalar @col_annot_names < $params{nr_annot_rows} )
        {
        chomp (my $row = <DATA>);
        my @row_data = split /$params{delim}/, $row;
        push @col_annot_names, $row_data[0];
        push @{$col_annots[$_- $params{nr_annot_columns} - 1]}, $row_data[$_] foreach $params{nr_annot_columns} + 1 .. $#row_data;
        }

    my @row_names;
    my @row_annots;
    my @D;
    my @Z;

    while (<DATA>)
        {
        chomp;
        my @row_data = split /$params{delim}/, $_, $params{cols}; # include limit of $params{cols} to make sure we don't load in more data than headers.

        # replacing commas to dots in data values
        foreach (  $params{nr_annot_columns} + 1 .. $#row_data )
            {
            $row_data[$_] =~ s/,/./;
            }

        push @row_names, $row_data[0] if $params{nr_annot_columns} >= 0;
        push @row_names, "R" . (scalar @row_names + 1)  if $params{nr_annot_columns} < 0;

        push @{$row_annots[$_-1]}, $row_data[$_] foreach (1..$params{nr_annot_columns});
        push @D, [ @row_data [ $params{nr_annot_columns} + 1 .. $#row_data ] ];

        my @zero_data;
        foreach (  $params{nr_annot_columns} + 1 .. $#row_data )
            {
            push @zero_data, ( $row_data[$_] =~ /^0?\.?0*$/o ? 1 : 0 );
            }
        push @Z, [ @zero_data ];
        }

    local ($^W = 0);
    my $d = pdl @D;
    my $z = pdl @Z;
    $d->inplace->setvaltobad(0);
    my $c = $d->flat->index(which($z==1)); $c .= 0;

    die "There was a problem parsing the data file -- did you set the correct delimiter type? If all else fails please contact Expression Profiler developers to report a bug. If possible, please include a sample of your data with your report.\n"
        if $d->dim(0) != scalar @col_names or $d->dim(1) != scalar @row_names or $d->dim(1) <= 0 or $d->dim(0) <= 0;

    return
        { pdl => $d
        , row_id_name => $row_col_id_names[0]
        , col_id_name => $row_col_id_names[1] || "column_id"
        , row_annots => \@row_annots
        , row_annot_names => \@row_annot_names
        , col_annots => \@col_annots
        , col_annot_names => \@col_annot_names
        , row_names  => \@row_names
        , col_names  => \@col_names
        };
    }

# sub: _write_dataset_data
# Writes the data files for an expression dataset to the disk. It uses hash keys for row and column indexes.
sub
_write_dataset_data
    {
    my $self = shift;
    my ( $dataset, $data ) = @_;
    local ( *COLUMNS, *ROWS );
    my (%seen);
    my $row = 0;

    my $folder   = $EP::Config::h->{EPNG_USRDATA_PATH} . "/" . $dataset->folder->location;
    my $filename = $dataset->filename;

    open (COLUMNS, ">$folder/$filename.columns") or die "Couldn't open $folder/$filename.columns, $!";
    open (ROWS, ">$folder/$filename.rows")       or die "Couldn't open $folder/$filename.rows, $!";

    #not used yet.... in the future to be used for mapping the user specified headers to common id's
    my $mapping;
    my @row_keys = keys %{$data->{row}};
    my @row_annot_names = $data->{row}{$row_keys[0]}{full_annot}->Keys;
    print ROWS "ID\t", join ( "\t", @row_annot_names ), "\n";

    my @col_keys = keys %{$data->{column}};
    my @col_annot_names = $data->{column}{$col_keys[0]}{full_annot}->Keys;
    print COLUMNS "ID\t", join ( "\t", @col_annot_names ), "\n";

    foreach my $row_id ( sort { $a <=> $b } keys %{ $data->{row} } )
        {
        my $col = 0;

        my $row_header = $data->{row}{$row_id}{header};
        my $row_annot  = $data->{row}{$row_id}{annot};

        print ROWS "$row\t", join ( "\t", $data->{row}{$row_id}{full_annot}->Values ), "\n";

        foreach my $col_id ( sort { $a <=> $b } keys %{ $data->{column} } )
            {
            if ( !$seen{$col_id}++ )
                {
                print COLUMNS "$col\t", join ( "\t", $data->{column}{$col_id}{full_annot}->Values ), "\n";
                }
            $col++;
            }
        $row++;
        }

    close COLUMNS;
    close ROWS;

    my $hdr = writeflex
                ( "$folder/$filename.bin"
                , long ( $data->{matrix}->dim(1) )
                , long ( $data->{matrix}->dim(0) )
                , float ( $data->{matrix} )
                );

    writeflexhdr( "$folder/$filename.bin", $hdr );

    $hdr = writeflex
                ( "$folder/$filename.transpose.bin"
                ,  long ( $data->{matrix}->dim(0) )
                ,  long ( $data->{matrix}->dim(1) )
                ,  float ( transpose ( $data->{matrix} ) )
                );
    writeflexhdr( "$folder/$filename.transpose.bin", $hdr );

    warn "Successfully written expression dataset $filename to folder $folder" if DEBUG;

    return 1;
    }

# sub: _read_dataset
# reads in the expression data from the disk and loads it in memory for use with the Dataset object
sub
_read_dataset
    {
    my ($dataset) = @_;
    local( *COLUMNS, *ROWS, *MATRIX );

    my ($folder) = $EP::Config::h->{EPNG_USRDATA_PATH} . "/" . $dataset->folder->location;
    my ($filename) = $dataset->filename;

    print STDERR "[EP_Core] No folder name defined\n" and return if $folder =~ /^\s*$/;
    print STDERR "[EP_Core] No dataset filename defined\n" and return if $filename =~ /^\s*$/;

    #read the row identifiers and annotations
    open (ROWS, "$folder/$filename.rows")
        or print STDERR "Couldn't open $filename.rows, $!\n" and die;

    chomp (my $row_annot_header = <ROWS>);
    my @row_annot_names = split "\t", $row_annot_header;
    shift @row_annot_names; # shift because the first item there is "ID"

    while (<ROWS>)
        {
        chomp $_;
        my ($row_id, @row_annot) = split /\t/, $_;

        my $row_annotation = new Tie::IxHash;
        $row_annotation->Push ( $row_annot_names[$_] => $row_annot[$_] ) foreach 0 .. $#row_annot_names;

        $dataset->{row}{$row_id} = { header => $row_id
                                   , mappped_row_header => ""
                                   , annot => \@row_annot
                                   , full_annot => $row_annotation
                                   };
        }
    close ROWS;

    #read the column identifiers and annotations
    open (COLUMNS, "$folder/$filename.columns")
        or print STDERR "Couldn't open $filename.columns, $!\n" and die;

    chomp (my $col_annot_header = <COLUMNS>);
    my @col_annot_names = split "\t", $col_annot_header;
    shift @col_annot_names; # shift because the first item there is "ID"

    while (<COLUMNS>)
        {
        chomp $_; 
        my ($col_id, @col_annot) = split /\t/, $_;

        my $col_annotation = new Tie::IxHash;
        $col_annotation->Push( $col_annot_names[$_] => $col_annot[$_] ) foreach 0 .. $#col_annot_names;

    $dataset->{column}{$col_id} = { header => $col_id
                                  , annot => \@col_annot
                                  , full_annot => $col_annotation
                                  };
        }
    close COLUMNS;

    my ($rows, $cols);
    ( $rows, $cols, $dataset->{matrix} ) = readflex("$folder/$filename.bin");

    my @dims = $dataset->{matrix}->dims();
    die "Problem with reading in PDL matrix from $folder/$filename.bin" if $rows != $dims[1] or $cols != $dims[0];

    warn "Read " . $dataset->{matrix}->info() if DEBUG;
    $dataset->{matrix}->inplace->setnantobad;
    $dataset->{matrix}->badflag(1);

    #add some extra stats to object
    $dataset->{binary} = "$filename.bin" if -f "$folder/$filename.bin";
    $dataset->{row_count} = scalar( keys %{ $dataset->{row} } );
    $dataset->{column_count} = scalar( keys %{ $dataset->{column} } );
    $dataset->{stats} = [$dataset->{matrix}->stats()];
    }

# sub: _identify_file
# tries to recognize the type of data (i.e. determine delimiters)
sub
_identify_file
    {
    my $self = shift;
    my ( $data, $query ) = @_;
    my $filetype = $query->param("filetype");

    if ( $filetype eq "XLS" )
        {
        return ( "TAB", qr /\t/ ) if $self->_parse_excel_files( $data, $query );
        die "Not an Excel file!";
        }

    my $file_delim = { TAB => qr /\t/
                     , VWS => qr /\s+/
                     , SPACE => qr / /
                     , CUSTOM => $query->param ( "cust_delim" ) . ""
                     };

    # if we are *given* the filetype, just go with it...

    # TODO: when I write a function to do -B with strings, I'll do a test here.
    # Now: if you upload an Excel file and want to use it as TAB-delimited, you get a mess...
    if ( not $filetype eq "AUTO" )
        {
        die "No delimiter provided for filetype $filetype!" if not exists $file_delim->{$filetype};
        return ( $filetype, $file_delim->{$filetype} );
        }
    else
        {
        # try to detect Excel first...
        if ( $self->_parse_excel_files( $data, $query ) )
            {
            warn "Automatically detected Excel" if DEBUG;
            return ( "TAB", qr /\t/ );
            }

        # try to do our usual text detection (NB: on the main data part only, assume delimiter same for others)
        foreach my $delim ( qw ( TAB VWS SPACE ) )
            {
            my $count = 0;

            open ID_DATA, "head -n 50 $data->{data} | grep -v --regexp='^\$' |";
            my @id_data = <ID_DATA>;
            close ID_DATA;

            # process only top 50 (or all if data is smaller) rows of the data
            foreach my $i ( 0 .. $#id_data - 1 )
                {
                my $nr = 0;
                my $nr2 = 0;
                while ( $id_data[$i]   =~ /$file_delim->{$delim}/g ) { $nr++ };
                while ( $id_data[$i+1] =~ /$file_delim->{$delim}/g ) { $nr2++ };

                $count++ if $nr == $nr2 and $nr > 1;    # count how many times this line and the following one contains same number
                                                        # of delimiters
                }

            if ( $count == $#id_data )
                {
                warn "Identified file as $delim-delimited" if DEBUG;
                return ( $delim, $file_delim->{$delim} );
                }
            }
        }
    }

# sub: _parse_excel_files
# take data parts, write to a temp file, check if binary and then xls2csv
sub
_parse_excel_files
    {
    my $self = shift;
    my ( $data, $query ) = @_;

    foreach my $part ( "data", "row_annot", "column_annot" )
        {
        next if not defined $data->{$part};
        my $tmp = new File::Temp( DIR => $EP::Config::h->{TMP}, SUFFIX => ".txt", UNLINK => 0 );

        my $out = system_call_with_output( "cd $EP::Config::h->{BIN}; ./xls2csv -q1 -c'	' 2>&1 $data->{$part}>$tmp" );
        if ( $out =~ /Error/ or defined EP::Common::Error->prior() and EP::Common::Error->prior()->value() == SYS_CALL_WITH_NONZERO_STATUS )
            {
            warn "Found a problem when converting (possibly) Excel file $data->{$part}" if DEBUG;
            unlink $tmp;
            return 0;
            }

        warn "Replacing converted $data->{$part} with $tmp" if DEBUG;

        unlink $data->{$part};
        $data->{$part} = $tmp;
        }

    return 1;
    }

1;
