// Copyright (C) 2010  Andrew H. Chan
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// 
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

#include "compute.h"
#include "packed_config.h"

//typedef samp_config_t::data_type data_type;

using namespace std;

// Constructor
Compute::Compute(InputParser& data) : data(data), num_vert_added(0), num_edge_added(0), num_base_cases(0), coeff_zero(0), roots_already_known(0), prev_seen_vertex(0), num_zero_probs(0), mut_terms(0), num_samp_added_for_root(0), num_edge_added_for_root(0), avg_samp_size_sum(0) { }

// Make sure that the input is the diallelic 3-locus case
int Compute::validate_input() {
    if(data.num_alleles != NUM_ALLELES) {
        return 1;
    }
    if(data.num_loci != NUM_LOCI) {
        return 1;
    }
    return 0;
}

int Compute::run() {
    if(validate_input()) {
        stringstream error_msg;
        error_msg << "Compute::validate_input() error: The current build is compiled to handle only " << NUM_LOCI << " loci and " << NUM_ALLELES << " alleles.";
        error_msg << " The current input has " << data.num_loci << " loci and " << data.num_alleles << " alleles.";
        throw runtime_error(error_msg.str());
    }

    // renormalize PIM vectors
    data.renormalize_PIM();

    // Compute defined loci and breakpoints 
    Packed_Config::compute_loci();
    Packed_Config::compute_breaks();

    // Create hap hashes
    create_maps();

    // add target sample (the sample defined in the input file) to seq_config
    convert_input_to_config();
    
    // debug
    assert(!seq_config.empty());

    packed_config_t target_config = seq_config.front();

    // get sequence of configs to solve for
    get_seq_config();

    // initialize base cases
    init_probs();
    // Compute the probabilities for each sample configuration
    // Probabilities are stored in probs
    compute_probs();

    // display prob of target configuration
    cout << "Target Configuration:" << endl;
    display_samp_config(target_config);

    size_t old_precision = cout.precision();
    cout << setprecision(15);
    cout << "Probability of target configuration: " << probs[target_config] << endl;
    cout << setprecision(old_precision);

    return 0;
}

int Compute::next_subset(vector<bool>& mask, int n) {
    int ii;
    for(ii = 0; (ii < n) && mask[ii]; ++ii) {
        mask[ii] = false;
    }
    if(ii < n) {
        mask[ii] = true;
        return 1;
    }
    return 0;
}

int Compute::next_conf(index_t& conf, int max_allele, int n) {
    int ii;
    for(ii = 0; (ii < n) && conf[ii] == max_allele; ++ii) {
        conf[ii] = 0;
    }
    if(ii < n) {
        ++conf[ii];
        return 1;
    }
    return 0;
}

// max_allele parameter should be (number of alleles - 1)
int Compute::next_hap(index_t& index, int max_allele) {
    int ii;
    int n = index.size();
    for(ii=0; (ii<n) && index[ii] == max_allele; ++ii) {
        index[ii] = -1;
    }
    if(ii<n) {
        ++index[ii];
        return 1;
    }
    return 0;
}

// initialize probabilities with base cases
int Compute::init_probs() {
    ///////////////////////////////////////////////////////////
    // NOTE: It might not be necessary to initialize the base cases all at once. Instead they can be initialized on the fly as needed.
    ///////////////////////////////////////////////////////////
    //
    // base cases: all configurations that have one individual
    vector<bool> mask(data.num_loci, false);
    // skip the empty set
    while(next_subset(mask, data.num_loci)) {
        // Figure out size of the subset
        size_t subset_size = 0;
        for(uint ii=0; ii<mask.size(); ++ii) {
            if(mask[ii])
                ++subset_size;
        }
        index_t subindex(subset_size, 0);
        // Generate all configurations for the defined loci.
        do {
            index_t index(data.num_loci, -1);

            //debug
            assert(mask.size() == data.num_loci);

            index_t::const_iterator sub_iter = subindex.begin();

            for(uint kk=0; kk<mask.size(); ++kk) {
                if(mask[kk]) {
                    // debug
                    assert(sub_iter != subindex.end());

                    index[kk] = *sub_iter;
                    ++sub_iter;
                }
            }
            ++num_base_cases;

            // create sample with one individual at index
            samp_config_t samp_config;
            samp_config[index] = 1;
            // base case prob is product of the alleles defined in that individual
            double base_case_prob = 1;
            for(uint pp = 0; pp < index.size(); ++pp) {
                if(index[pp] != -1) {
                    // pp is which locus the index refers to
                    // index[pp] is which allele the individual has
                    base_case_prob *= data.pim_vectors[pp][index[pp]];
                }
            }
            // assign base case prob to samp_config in probs 
            packed_config_t packed_config;
            convert_to_packed(samp_config, packed_config);
            probs[packed_config] = base_case_prob;
        } while(next_conf(subindex, data.num_alleles-1, subset_size));
    }

    return 0;
}

// convert input to configuration
int Compute::convert_input_to_config() {
    packed_config_t packed_config;
    for(individuals_t::const_iterator iter = data.individuals.begin();
            iter != data.individuals.end(); ++iter) {
        quantity_t quantity = iter->second; // compiler should produce warning because iter->second is int

        // construct index for hap
        // parser converts input sample to indexes and quantities
        index_t const &index = iter->first; 
        hap_id_t hap_id = hap_to_id[index]; 
        packed_config[hap_id] += quantity;
    }
    seq_config.push_back(packed_config);
    return 0;
}

int Compute::create_maps() {
    index_t index(data.num_loci, -1);
    hap_id_t hap_id = 0;
    // skip the all -1 index by calling next_hap right away
    while(next_hap(index, data.num_alleles-1)) {
        hap_to_id[index] = hap_id;
        id_to_hap[hap_id] = index;
        ++hap_id;
        assert(hap_id != 255);
    }
    return 0;
}

int Compute::display_samp_config(samp_config_t const& samp_config) const {
    cout << "==start sample==" << endl;
    uint tot_samp_size = 0;
    for(samp_config_t::const_iterator citer = samp_config.begin();
            citer != samp_config.end(); ++citer) {
        // print out sample 
        cout << citer->first;
        cout << "qua: " << citer->second << endl;
        tot_samp_size += citer->second;
    }
    cout << "total sample size: " << tot_samp_size << endl;
    cout << "==end sample==" << endl;
    return 0;
}

int Compute::display_samp_config(packed_config_t const& packed_config) const {
    display_packed_config(packed_config);
    return 0;
}

int Compute::display_packed_config(packed_config_t const& packed_config) const {
    // convert packed config to samp config
    samp_config_t tmp_samp_config;
    convert_to_samp(packed_config, tmp_samp_config);
    display_samp_config(tmp_samp_config);
    return 0;
}

// get sequence of configurations to solve
// Note: Requires target config to be in seq_config. (i.e. seq_config cannot be empty.)
// This is a heuristic.
// Basically, I don't want to start searching from the target config directly, because then I'd explore the entire graph in one sweep. Instead, I'll break down the target config into smaller configs by using a heuristic, which iteratively subtracts 1 from the largest element in the config, and adds the resulting sample configuration to a list until I reach a base case. 
// Then I start solving these configs starting from the end of the list (i.e. the smallest configs). 
// In the process of solving the configs, the graph is explored
int Compute::get_seq_config() {
    if(seq_config.empty()) {
        cout << "Error in get_seq_config: seq_config is empty." << endl;
        return 1;
    }

    packed_config_t cur_samp = seq_config.back(); 
    // while loop breaks when sample configuration is empty
    while(true) {
        // initialize to default just in case
        hap_id_t max_hap_id = 0;
        quantity_t max_elem = 0;
        // Iterate over all non-zero values in cur_samp and find max and its index
        for(uint ii=0; ii<NUM_HAPS; ++ii) {
            if(cur_samp[ii] >= max_elem) {
                max_elem = cur_samp[ii];
                max_hap_id = ii; 
            }
        }

        // Subtract 1 from largest element
        --cur_samp[max_hap_id];

        // Check to see if the configuration is all zeros (it should be empty hash if that's the case).
        if(cur_samp.empty()) {
            break;
        }
        else {
            seq_config.push_back(cur_samp);
        }
    }
    return 0;
}

int Compute::compute_probs() {
    if(seq_config.empty()) {
        cout << "Error in compute_probs(): seq_config is empty." << endl;
        return 1;
    }
    // Go through seq_config in reverse order and solve each in turn
    for(seq_config_t::reverse_iterator rev_iter = seq_config.rbegin();
            rev_iter != seq_config.rend(); ++rev_iter) {
        // Only explore starting from a vertex if its probability hasn't been found yet
        // If its probability is found in the probs hash map, then continue to next config 
        if(probs.find(*rev_iter) != probs.end()) {
            ++roots_already_known;
            continue;
        }

        // debug
        cout << "Solving for" << endl;
        display_samp_config(*rev_iter);

        // debug
        cout << "Exploring graph..." << endl;

        // explore the graph starting from the current config
        explore_graph(*rev_iter);

        // debug
        cout << "Done exploring graph\n" << endl;

        // debug
        cout << "Computing scc's" << endl;

        // obtain list of sccs
        vv_packed_config_t list_of_sccs; 

        compute_sccs(list_of_sccs);

        // debug
        cout << "Done computing scc's." << endl;

        // debug
        cout << "------" << endl;
        cout << "# scc's: " << list_of_sccs.size() << endl;
        cout << "Number of vertices: " << graph.size() << endl;
        cout << "------" << endl;
        // end debug

        // debug
        cout << "Ordering scc's topologically" << endl;

        // order sccs topologically
        scc_rank_t scc_rank;
        topological_sort_scc(list_of_sccs, scc_rank);

        // debug
        cout << "Done with ordering scc's topologically" << endl;
        //cout << "num of sccs: " << list_of_sccs.size() << endl;
        // end debug
 
        // debug
        cout << "Solving scc system" << endl;

        // solve the scc's in order
        for(scc_rank_t::iterator scc_rank_iter = scc_rank.begin();
                scc_rank_iter != scc_rank.end(); ++scc_rank_iter) {
            size_t index_of_scc_to_solve = scc_rank_iter->second;
            packed_vector_t &scc_to_solve = list_of_sccs[index_of_scc_to_solve]; 

            // b_vector is scc.size() x 1
            // A_matrix is scc.size() x scc.size() 
            // Initializing to all 0.0's is important!
            vector_t b_vector(scc_to_solve.size(), 0.0);
            matrix_t A_matrix(scc_to_solve.size(), scc_to_solve.size(), 0.0);
            // This maps a sample to its row in the system (row of A_matrix and the corresponding row of b_vector)

            packed_uint_hash_t packed_index_system;
            convert_scc_to_system(scc_to_solve, b_vector, A_matrix, packed_index_system);

            // -----
            // Measure sparsity of systems
            //uint nonzero = 0;
            //for(uint ii=0; ii<A_matrix.size1(); ++ii) {
            //    for(uint jj=0; jj<A_matrix.size2(); ++jj) {
            //        if(A_matrix(ii,jj) != 0) {
            //            ++nonzero;
            //        }  
            //    }
            //}
            //cout << "Sparsity" << endl;
            //cout << "non-zero elements: " << nonzero << endl;
            //cout << "matrix size: " << A_matrix.size1() * A_matrix.size2() << endl;
            //cout << "density (nz/n^2): " << static_cast<double>(nonzero) / (A_matrix.size1() * A_matrix.size2()) << endl;
            // -----

            // Solve the system, which consists of b_vector and A_matrix
            // The solution is stored in sol_vector
            vector_t sol_vector(scc_to_solve.size(), 0);
            system_solve(b_vector, A_matrix, sol_vector);

            // Now iterate through scc_to_solve and assign probabilities (solutions) to probs 
            for(packed_vector_t::iterator iter = scc_to_solve.begin();
                    iter != scc_to_solve.end(); ++iter) {
                uint row_index = packed_index_system[*iter];
                
                // debug
                assert(!isnan(sol_vector[row_index]));

                // store solved probability in probs
                probs[*iter] = sol_vector[row_index];

                // debug
                if(probs[*iter] == 0.0) {
                    ++num_zero_probs;
                }
            }
        } // Done solving scc's in current block

        // debug
        cout << "Done solving scc's in current block" << endl;
        size_t old_precision = cout.precision();
        cout << setprecision(15);
        cout << "Probability of configuration: " << probs[*rev_iter] << endl;
        cout << setprecision(old_precision);
        // end debug

        // clear graph and clear border_vert
        graph.clear();
        border_vert.clear();
    }
    return 0;
}

// Explore recursion graph starting from samp_config.
// This is where most of the work is.
int Compute::explore_graph(packed_config_t& packed_config) {
    // breadth-first-search queue
    packed_queue_t bfs_queue;
    // start BFS starting from packed_config
    bfs_queue.push(packed_config);

    // put the root in the graph
    // note: the graph is reset every time a config in the sequence is explored
    // Add root to graph
    graph[packed_config] = vec_out_edge_t();

    //////////
    // debug
    // reset average sample size for this root
    avg_samp_size_sum = 0.0;
    // reset num_samp_added_for_root
    num_samp_added_for_root = 0;
    // reset num_edge_added_for_root
    num_edge_added_for_root = 0;
    avg_samp_size_sum += packed_config.distinct_hap();
    ++num_vert_added;
    ++num_samp_added_for_root;
    // end debug
    //////////

    while(!bfs_queue.empty()) {
        packed_config_t cur_samp = bfs_queue.front();
        bfs_queue.pop();

        //////////////////////
        // Compute Z
        // Z is the divisor in the recursion equation
        // n*(n-1) + mut * V + rho * U
        // mut is the mutation rate (\theta)
        // rho is the recombination rate (\rho)
        // V is the total number of loci defined (weighted by quantities)
        // U is the total number of significant breakpoints (weighted by quantities)
        //

        double Z = 0;

        // Compute n
        double n = static_cast<double>(cur_samp.size());

        // Compute V
        double V = static_cast<double>(cur_samp.compute_V());

        // Compute U
        double U = static_cast<double>(cur_samp.compute_U());

        // Calculate Z
        Z += n*(n-1) + data.mut * V  + data.rho * U;

        // Done computing Z 
        //////////////////////
        
        //////////////////////////////////////////
        // Handle coalescent term
        //////////////////////////////////////////
        //
        //
        // Iterate through all indexes in the sample using degree list
        // Compute degree list
        // The degree list is a list of index-masks sorted according to their degrees
        // The degree of an index is the number of loci it defines (the number of entries that are not -1)
        // The mask is a vector of the loci that the haplotype defines
        // Iterate through indices in sample and calculate their degrees and masks
        degree_list_t deg_list;
        for(uint jj=0; jj<NUM_HAPS; ++jj) {
            hap_id_t const& cur_hap_id = static_cast<hap_id_t>(jj); 
            if(cur_samp[cur_hap_id] < 1)
                continue;

            index_t cur_index = id_to_hap[cur_hap_id];
            // compute cur index's degree
            // compute cur index's mask
            vi mask;
            size_t cur_index_degree = 0;
            for(uint ii = 0; ii < cur_index.size(); ++ii) {
                if(cur_index[ii] != -1) {
                    ++cur_index_degree;
                    mask.push_back(ii);
                }
            }
            // add cur index and its degree to deg_list
            hap_id_mask_pair_t hap_id_mask_pair(cur_hap_id, mask);
            deg_list.push_back(degree_pair_t(cur_index_degree, hap_id_mask_pair));
        }

        // sort deg_list
        sort(deg_list.begin(), deg_list.end());

        // Compute coalescent terms 1 and 2
        for(degree_list_t::iterator outer_d_iter = deg_list.begin();
                outer_d_iter != deg_list.end(); ++outer_d_iter) {
            // current hap
            hap_id_t &cur_hap_id = outer_d_iter->second.first;
            index_t cur_index = id_to_hap[cur_hap_id];

            vi &cur_mask = outer_d_iter->second.second;

            size_t cur_degree = outer_d_iter->first;
            double cur_hap_quantity = static_cast<double>(cur_samp[cur_hap_id]);

            bool cur_hap_quantity_more_than_one = (cur_samp[cur_hap_id] > 1);

            //////////////////////////////////////
            // Compute coalescent term 1
            //////////////////////////////////////

            // Compute first term of coalescent term 1
            // This is where a haplotype merges with someone identical
            //
            double first_term = cur_hap_quantity * (cur_hap_quantity - 1);

            // Compute second term of coalescent term 1
            // This is where a haplotype merges into someone more specified
            //
            // Find first hap of greater degree starting from current hap (since the degree list is sorted)
            // This definition of "degree" differs from the use in Jenkins, Song (2009)
            // Their definition is a sample's V value (e.g. cur_samp.compute_V())
            // My definition is simply the number of loci defined by a hap
            // Notice that their definition applies to *samples*
            // Mine applies to haplotypes 
            //
            degree_list_t::iterator inner_d_iter;
            for(inner_d_iter = outer_d_iter; inner_d_iter != deg_list.end(); ++inner_d_iter) {
                // break when we find an index of higher degree than the current degree
                if(inner_d_iter->first > cur_degree)
                    break;
            }
            // Otherwise we've found an index of higher degree
            // Starting from there, check for compatible, more specified indexes
            vector<index_t> compatible_indexes;
            // reuse inner_d_iter since it points to the correct location in the degree list
            // This for loop will be skipped if there are no indexes of higher degree
            for( ; inner_d_iter != deg_list.end(); ++inner_d_iter) {
                hap_id_t &other_hap_id = inner_d_iter->second.first;
                index_t other_index = id_to_hap[other_hap_id];

                // Check for compatibility
                // other_index must be of higher degree at this point because of previous for loop
                bool compatible = true;
                for(uint ii = 0; ii < cur_index.size(); ++ii) {
                    if(cur_index[ii] != -1) {
                        if(cur_index[ii] != other_index[ii]) {
                            compatible = false;
                            break;
                        }
                    }
                }
                if(compatible) {
                    compatible_indexes.push_back(other_index);
                }
            }

            // Only add vertex if we know that the coefficient won't be zero
            // I use cur_hap_quantity_more_than_one instead of comparing directly with the inequality: cur_hap_quantity > 1, because cur_hap_quantity is a double and I want to minimize the reliance on floating-point comparisons
            if(cur_hap_quantity_more_than_one ||
                    !compatible_indexes.empty() ) {
                // For every index that was found to be compatible and more specified, add to running sum for the second term in coalescent term 1 
                double second_term_cum_sum = 0.0;
                for(vector<index_t>::iterator comp_iter = compatible_indexes.begin();
                        comp_iter != compatible_indexes.end(); ++comp_iter) {
                    index_t &comp_index = *comp_iter;
                    hap_id_t comp_id = hap_to_id[comp_index];
                    double comp_hap_quantity = static_cast<double>(cur_samp[comp_id]);
                    second_term_cum_sum += cur_hap_quantity * comp_hap_quantity;
                }
                // Only add vertex to graph if the coefficient is greater than 0
                // Basically, if coefficient is equal to 0, either
                // we have numerical inaccuracy
                // or there was no merging with a more specified haplotype to be done
                //
                // Compute coefficient for vertex
                double second_term = 2.0 * second_term_cum_sum;
                double coefficient = (first_term + second_term) / Z ;

                // Construct recursive samp
                packed_config_t recursive_samp = cur_samp;
                // The haplotype (cur_hap) that merges with someone more specified disappears
                --recursive_samp[cur_hap_id];

                // Add recursive samp to graph along with coefficient
                add_vertex(cur_samp, recursive_samp, coefficient, bfs_queue);
            }

            //////////////////////////////////////
            // Compute coalescent term 2
            //////////////////////////////////////
            // This is where a haplotype merges with a compatible haplotype that is not *strictly* more specified
            //
            // Start from immediately subsequent haplotype and check all later ones in the degree list for merge-compatibility
            for(degree_list_t::iterator inner_d_iter2 = outer_d_iter + 1; inner_d_iter2 != deg_list.end(); ++inner_d_iter2) {

                hap_id_t &other_hap_id = inner_d_iter2->second.first;
                index_t other_index = id_to_hap[other_hap_id]; 
                vector<index_t> merge_indexes;
                // mask_iter goes through each locus defined for the index
                // Check for compatibility for merge
                // To merge, the other index must not only be compatible with the current index, but it must not be *strictly* more specified. i.e. the current index must specify a locus that the other index does not
                bool compatible = true;
                bool cur_specifies_more = false;
                // Check for consistency and incomparability (i.e. current index specifies at least one locus that the other does not, and vice versa)
                for(vi_iter mask_iter = cur_mask.begin(); mask_iter != cur_mask.end(); ++mask_iter) {
                    if( other_index[*mask_iter] != -1
                            && cur_index[*mask_iter] != other_index[*mask_iter] ) {
                        compatible = false;
                        break;
                    }
                    if( other_index[*mask_iter] == -1 ) {
                        cur_specifies_more = true;
                    }
                }
                // Found a merge-compatible index
                // Compute the coefficient and add the vertex
                // Only add if compatible and cur_index specifies more
                if(compatible && cur_specifies_more) {
                    double merge_hap_quantity = static_cast<double>(cur_samp[other_hap_id]);
                    double coefficient = (2.0 * cur_hap_quantity * merge_hap_quantity) / Z;

                    // Create recursive sample and add to graph
                    // The haplotypes (cur_hap and other_hap) that merge disappear and form a haplotype that is more specified
                    // Create the more specified hap
                    index_t new_index(data.num_loci, -1);
                    for(uint ii=0; ii<new_index.size(); ++ii) {
                        if(cur_index[ii] != -1) {
                            new_index[ii] = cur_index[ii];
                        }
                        if(other_index[ii] != -1) {
                            new_index[ii] = other_index[ii];
                        }
                        // debug
                        // Make sure they really are consistent
                        if(cur_index[ii] != -1 && other_index[ii] != -1) {
                            assert(cur_index[ii] == other_index[ii]);
                        }
                        // end debug
                    }

                    hap_id_t new_hap_id = hap_to_id[new_index];

                    // Create recursive samp and add to graph
                    packed_config_t recursive_samp = cur_samp;
                    --recursive_samp[cur_hap_id];
                    --recursive_samp[other_hap_id];
                    ++recursive_samp[new_hap_id];

                    // Add recursive samp to graph along with coefficient
                    add_vertex(cur_samp, recursive_samp, coefficient, bfs_queue);
                } // End add merge-compatible index
                // else not merge-compatible, so move on to next index
            } // end for loop that checks for merge-compatibility

        } // end going through degree list for the coalescent terms
        ///////////////////////////////////
        // End coalescent term
        ///////////////////////////////////

        ///////////////////////////////////
        // Handle mutation term
        ///////////////////////////////////
        //
        // Iterate through all indices of sample
        for(uint jj=0; jj<NUM_HAPS; ++jj) { 
            hap_id_t const& cur_hap_id = static_cast<hap_id_t>(jj);
            if(cur_samp[cur_hap_id] < 1) {
                continue;
            }
            index_t cur_index = id_to_hap[cur_hap_id];
            double cur_hap_quantity = static_cast<double>(cur_samp[cur_hap_id]);

            // Find specified loci and mutate them
            for(uint ii=0; ii<cur_index.size(); ++ii) {
                // If undefined, continue to next locus
                if(cur_index[ii] == -1) { 
                    continue;
                }
                // Else mutate locus
                // Mutate allele at the specified locus (under PIM model it becomes unspecified)
                int allele_type = cur_index[ii];

                // Compute coefficient
                // pim_vector[locus][allele]
                double coefficient =
                        (data.mut * cur_hap_quantity * data.pim_vectors[ii][allele_type]) / Z;
                // Create mutated allele index
                // In PIM model, when a locus mutates, it becomes unspecified
                index_t mutated_index = cur_index;
                // Make mutated locus unspecified
                mutated_index[ii] = -1;
                // Make sure mutated_index is not completely unspecified
                // If it becomes completely unspecified, then we remove the haplotype
                bool completely_unspecified = true;
                for(uint ii=0; ii<mutated_index.size(); ++ii) {
                    if(mutated_index[ii] != -1) {
                        completely_unspecified =false;
                        break;
                    }
                }

                // Only add to graph if specified at at least one locus
                // If specified at at least one locus then subtract original and add new hap (with one allele missing)
                if(!completely_unspecified) {
                    hap_id_t mutated_hap_id = hap_to_id[mutated_index];

                    // Create recursive sample
                    packed_config_t recursive_samp = cur_samp;
                    // Subtract out one individual of cur_index
                    --recursive_samp[cur_hap_id];
                    // Add one one individual of mutated index
                    ++recursive_samp[mutated_hap_id];
                    // Add recursive sample to graph
                    ++mut_terms;
                    add_vertex(cur_samp, recursive_samp, coefficient, bfs_queue);
                }
                else {
                    // If completely unspecified, subtract original
                    packed_config_t recursive_samp = cur_samp;
                    // Subtract out one individual of cur hap 
                    --recursive_samp[cur_hap_id];
                    ++mut_terms;
                    add_vertex(cur_samp, recursive_samp, coefficient, bfs_queue);
                }
            } // End iterating over loci
        } // End iterating over indexes for mutation term
        ////////////////////////////////////
        // End mutation term
        ////////////////////////////////////

        ///////////////////////////////////
        // Handle recombination term
        ///////////////////////////////////
        //
        // Iterate through all indices of sample
        for(uint jj=0; jj<NUM_HAPS; ++jj) { 
            hap_id_t const& cur_hap_id = jj;
            if(cur_samp[cur_hap_id] < 1)
                continue;
            index_t cur_index = id_to_hap[cur_hap_id];
            double  cur_hap_quantity = static_cast<double>(cur_samp[cur_hap_id]);

            // Get the left and right endpoints of cur index 
            size_t left_endpoint=0;
            size_t right_endpoint=0;
            // get left endpoint
            for(uint ii=0; ii<cur_index.size(); ++ii) {
                if(cur_index[ii] != -1) {
                    left_endpoint = ii;
                    break;
                }
            }
            // get right endpoint
            for(uint ii=cur_index.size()-1; ii>=0; --ii) {
                if(cur_index[ii] != -1) {
                    right_endpoint = ii;
                    break;
                }
            }

            // debug
            assert(right_endpoint < cur_index.size());

            // Starting from immediately after the left endpoint, find the next defined locus
            // old_left_pointer will keep track of how many undefined loci we come across as we search from left to right of cur_index looking for defined loci
            uint old_left_pointer = left_endpoint;
            for(uint left_pointer = left_endpoint+1 ; left_pointer <= right_endpoint; ++left_pointer) {
                // Found defined locus
                if(cur_index[left_pointer]!=-1) {
                    // create left_hap with left half
                    // left_hap contains everything to the left of left_pointer (not including left_pointer)
                    index_t left_index = index_t(data.num_loci, -1);
                    for(uint kk=0; kk<left_pointer; ++kk) {
                        left_index[kk] = cur_index[kk];
                    }
 
                    // create right_hap with right half
                    // right_hap contains everything to the right of left_pointer (including left_pointer)
                    index_t right_index = index_t(data.num_loci, -1);
                    for(uint kk=left_pointer; kk<right_index.size(); ++kk) {
                        right_index[kk] = cur_index[kk];
                    }

                    // The coefficient should be multiplied by repeat_breakpoint, since instead of adding each breakpoint in separately, I'm adding it all at once, taking advantage of the fact that all breakpoints have the same recombination rate
                    double repeat_breakpoint = static_cast<double>(left_pointer - old_left_pointer);

                    // create recursive_sample
                    hap_id_t left_hap_id = hap_to_id[left_index];
                    hap_id_t right_hap_id = hap_to_id[right_index];

                    packed_config_t recursive_samp = cur_samp;
                    
                    ++recursive_samp[left_hap_id];
                    ++recursive_samp[right_hap_id];
                    --recursive_samp[cur_hap_id];
                    // add left_hap and right_hap, each with quantity 1
                    // the coefficient is ( rho * quantity_of_the_current index * (left_pointer - old_left_pointer) )
                    double coefficient = (data.rho * cur_hap_quantity * repeat_breakpoint) / Z;

                    // add recursive_sample with coefficient to graph 
                    add_vertex(cur_samp, recursive_samp, coefficient, bfs_queue);
                    // set old_left_pointer to left_pointer before iterating again
                    old_left_pointer = left_pointer;
                }
            }
            // Done searching from left to right of cur_index, looking for breakpoints

        } // End iterating through all indices from recombination term
        ///////////////////////////////////
        // End recombination term
        ///////////////////////////////////

    } // End BFS for exploring from seq_config sample configuration

    /////////
    // debug
    assert(graph.size() == num_samp_added_for_root);
    // reset num_samp_added_for_root
    num_samp_added_for_root = 0;
    // end debug
    /////////

    return 0;
}

int Compute::add_vertex(packed_config_t& source, packed_config_t& target, double coefficient, packed_queue_t& bfs_queue) {
    // If coefficient == 0, then there's no point to add the vertex to the graph
    if(coefficient == 0) {
        ++coeff_zero;
        return 1;
    }

    // The source should always already be in the graph
    // Either we add it as we go through seq_config
    // or it must've been added by a previous vertex as we explore the graph
    assert(graph.find(source) != graph.end());

    // If probability is already associated with the target vertex being added,
    //   then it must have belonged to a previous *block* (or maybe it's a base case)
    // Record its value in the *source* vertex's border_vert hash map for use when generating the linear system. This will be a running sum of the known coefficients*probabilities for the source vertex.
    if(probs.find(target) != probs.end()) {
        // Add source to border_vert if not in hash map yet
        if(border_vert.find(source) == border_vert.end()) {
            border_vert[source] = 0;
        }
        border_vert[source] += coefficient * probs[target];
    }
    else {
        // target's probability hasn't yet been solved for
        // It must be in the current block
        // We need to add it to the list of vertices to be explored
        if(graph.find(target) == graph.end()) {
            // If target isn't in graph yet, add target to graph with default vec_out_edge_t
            graph[target] = vec_out_edge_t();
            bfs_queue.push(target);
            ++num_vert_added;
            // debug
            avg_samp_size_sum += target.distinct_hap();
            ++num_samp_added_for_root;
        }

        // Check to see if edge is a repeat (as in, the target vertex is a repeat, regardless of the associated coefficient)
        // If it is a repeat, add to existing coefficient on edge to save an edge and return from here
        // All this does is save an edge. It doesn't affect correctness (multi-edges are okay, but we might as well save an edge)
        // This check slows down the common case (when there is no multi-edge), so time-wise, it's slower. But memory-wise, it might save us a little.
        vec_out_edge_t::iterator prev_seen_edge_iter;
        for(prev_seen_edge_iter = graph[source].begin();
                prev_seen_edge_iter != graph[source].end(); ++prev_seen_edge_iter) {
            if(prev_seen_edge_iter->first == target) {
                ++prev_seen_vertex;
                ++num_edge_added;
                ++num_edge_added_for_root;

                // Add current coefficient to the existing coefficient on the previously-seen edge
                prev_seen_edge_iter->second += coefficient;

                // Note: Multiple return points in this function
                return 0;
            }
        }

        // We reach this case if the edge hasn't been seen before (the case where the edge is seen before is handled above)
        // Then we have to add a new edge
        out_edge_t new_out_edge(target, coefficient);
        graph[source].push_back(new_out_edge);

        ++num_edge_added;
        ++num_edge_added_for_root;
    } // end else (vertex hasn't been solved for yet)

    // Note: Multiple return points in this function
    return 0;
}

// Find strongly connected components of graph using Tarjan's algorithm
int Compute::compute_sccs(vv_packed_config_t& list_of_sccs) {
    // Iterate through all vertices in graph to find strongly connected components
    size_t scc_index = 0;
    packed_stack_t scc_stack;
    packed_int_hash_t scc_in_stack;
    packed_uint_hash_t scc_id;
    packed_uint_hash_t scc_lowlink;

    for(adj_list_t::iterator g_iter = graph.begin(); g_iter != graph.end(); ++g_iter) {
        // Get associated sample configuration from vertex
        packed_config_t const &cur_samp = g_iter->first;
        // If vertex hasn't been explored yet, explore it
        if(scc_id.find(cur_samp) == scc_id.end()) {
            scc_explore(cur_samp, scc_index, scc_id, scc_lowlink, scc_stack, scc_in_stack, list_of_sccs);
        }
    }
    return 0;
}

// Tarjan's algorithm for finding strongly connected components
int Compute::scc_explore(packed_config_t const& cur_samp, size_t& scc_index, packed_uint_hash_t& scc_id, packed_uint_hash_t& scc_lowlink, packed_stack_t &scc_stack, packed_int_hash_t &scc_in_stack, vv_packed_config_t& list_of_sccs) {
    scc_id[cur_samp] = scc_index;
    scc_lowlink[cur_samp] = scc_index;
    ++scc_index;

    scc_stack.push(cur_samp); // push samp_config on stack
    scc_in_stack[cur_samp] = 1; // efficiently keep track of which samp_configs are in stack

    // Iterate through cur_samp's neighbors
    for(vec_out_edge_t::iterator n_iter = graph[cur_samp].begin();
            n_iter != graph[cur_samp].end(); ++n_iter) {
        packed_config_t const &neighbor_samp = n_iter->first;
        // If neighbor hasn't been visited yet
        if(scc_id.find(neighbor_samp) == scc_id.end()) {
            scc_explore(neighbor_samp, scc_index, scc_id, scc_lowlink, scc_stack, scc_in_stack, list_of_sccs);
            scc_lowlink[cur_samp] = min(scc_lowlink[cur_samp], scc_lowlink[neighbor_samp]);
        }
        else if(scc_in_stack[neighbor_samp] == 1) {
            // If neighbor is in stack, then it must be in the same scc
            scc_lowlink[cur_samp] = min(scc_lowlink[cur_samp], scc_id[neighbor_samp]);
        }
        // If vertex was already listed off in another scc, then ignore it
    }
    if(scc_lowlink[cur_samp] == scc_id[cur_samp]) {
        seq_config_t scc_list;

        // pop off everything above cur_samp and add to scc_list
        assert(scc_stack.size() > 0);
        while(cur_samp != scc_stack.top()) {
            scc_list.push_back(scc_stack.top());
            scc_in_stack[scc_stack.top()] = 0;
            scc_stack.pop();
        }
        // add cur_samp to scc_list
        scc_list.push_back(scc_stack.top());
        scc_in_stack[scc_stack.top()] = 0;
        scc_stack.pop();

        // The above can be optimized (time-wise) if I keep the index along with the samp_config in the stack so that I only have to check the index (scc_id)
        list_of_sccs.push_back(scc_list);
    }
    return 0;
}

// topological sort
int Compute::topological_sort_scc(vv_packed_config_t& list_of_sccs, scc_rank_t& scc_rank) {
    // Step 1
    // Run (recursive) DFS on entire graph to get post numbers
    // (the post number is assigned to a vertex right before the recursive function returns; conversely, the pre number is assigned before the recursive function explores the vertex's neighbors)
    packed_uint_hash_t post_numbers;
    packed_int_hash_t visited;
    uint post_number = 0;
    for(adj_list_t::iterator vert_iter = graph.begin(); vert_iter != graph.end(); ++vert_iter) {
        packed_config_t const &packed_config = vert_iter->first;
        // If not seen yet, then explore. Otherwise continue to next vertex.
        if(visited.find(packed_config) == visited.end()) {
            dfs_explore(packed_config, post_number, post_numbers, visited);
        }
    }

    // Step 2
    // Now that we have the post numbers, go through each scc and assign it its highest post number
    // Lower post numbers should be solved first (they depend on the fewest things)
    // The scc graph should be a DAG
    // For each scc, find its highest post-numbered vertex
    // Use this to order the sccs
    // scc_rank maps from scc ID to rank (order in which it should be solved)
    // An scc's ID is its index in list_of_sccs
    for(uint ii = 0; ii<list_of_sccs.size(); ++ii) {
        uint highest_post_number = 0;
        // Get scc's highest post number
        for(packed_vector_t::iterator iter = list_of_sccs[ii].begin();
                iter != list_of_sccs[ii].end(); ++iter) {
            highest_post_number = max(highest_post_number, post_numbers[*iter]);
        }
        scc_rank.push_back( scc_post_scc_t(highest_post_number, ii) );
    }

    // Sort this list by post number
    sort(scc_rank.begin(), scc_rank.end());
    //  scc_rank now contains : [post number <-> (scc_index in list_of_sccs)]

    return 0;
}

// Recursive DFS
int Compute::dfs_explore(packed_config_t const& packed_config, uint& post_number, packed_uint_hash_t& post_numbers, packed_int_hash_t& visited) {
    // Mark as visited
    visited[packed_config] = 1;
    // Check all of its neighbors
    for(vec_out_edge_t::iterator n_iter = graph[packed_config].begin();
            n_iter != graph[packed_config].end(); ++n_iter) {
        packed_config_t const& neighbor = n_iter->first;
        // If neighbor hasn't been visited yet, visit the neighbor
        if(visited.find(neighbor) == visited.end()) {
            dfs_explore(neighbor, post_number, post_numbers, visited);
        }
    }
    post_numbers[packed_config] = post_number;
    ++post_number;
    return 0;
}

// converts scc to system by filling in b_vector and A_matrix
int Compute::convert_scc_to_system(packed_vector_t& scc_to_solve, vector_t& b_vector, matrix_t& A_matrix, packed_uint_hash_t &packed_index_system) {
    // Use of packed_index_system:
    // We need to be able to quickly find out a samp_config's index in scc_to_solve
    // This index determines the samp_config's row number in A_matrix and position in b_vector
    for(uint ii=0; ii<scc_to_solve.size(); ++ii) {
        packed_index_system[scc_to_solve[ii]] = ii;
    }

    // Fill in A_matrix and b_vector
    for(uint ii=0; ii<scc_to_solve.size(); ++ii) {
        packed_config_t &cur_samp = scc_to_solve[ii];

        // Set the diagonal of the A_matrix to 1.0
        A_matrix(ii, ii) = 1.0;

        // If the vertex is in border_vert, that means it has a neighbor that was already solved for in a previous block.
        // Set b = border_vert[cur_samp] (which is the coefficient * probability of that vertex)
        // border_vert is set in add_vert(), which is called from explore_graph()
        // explore_graph() gives add_vert() the coefficient for the vertex, and add_vert() finds the probability for the previously-solved-for vertex and multiplies it by the coefficient and adds the result to border_vert[cur_samp] (where cur_samp is the current sample for the add_vert() and explore_graph() functions)
        if(border_vert.find(cur_samp) != border_vert.end()) {
            b_vector(ii) = border_vert[cur_samp];
        }
        else {
            b_vector(ii) = 0.0; // Otherwise b should start at 0.0.
        }

        // Iterate over all outgoing neighbors of the current vertex
        for(uint jj=0; jj < graph[cur_samp].size(); ++jj) {
            packed_config_t neighbor = graph[cur_samp][jj].first;
            double neighbor_coeff = graph[cur_samp][jj].second;

            // debug
            //cout << "neighbor_coeff: " << neighbor_coeff << endl;
            assert(neighbor_coeff != 0.0);
            assert(!isnan(neighbor_coeff));

            // If neighbor is not in current scc, make sure neighbor's probability has been solved for
            if(packed_index_system.find(neighbor) == packed_index_system.end()) {
                assert(probs.find(neighbor) != probs.end());
            }

            // If neighbor is in current scc, add neighbor's coefficient to the appropriate entry in A_matrix
            if(packed_index_system.find(neighbor) != packed_index_system.end()) {
                // We add the negative of the coeff in this linear system
                A_matrix(ii, packed_index_system[neighbor]) += -1.0 * neighbor_coeff;
            }
            else {
                // Else the neighbor is not in the current scc
                // It must have been solved for already

                // debug
                // It should have a probability associated with it already
                assert(probs.find(neighbor) != probs.end());

                b_vector(ii) += probs[neighbor] * neighbor_coeff;
            }
        }
    }
    return 0;
}

// Pass in samp_config and output packed_config
int Compute::convert_to_packed(samp_config_t const& samp_config, packed_config_t& packed_config) const {
    packed_config.clear();
    // Go thorugh samp_config
    for(samp_config_t::const_iterator citer = samp_config.begin(); citer != samp_config.end(); ++citer) {
        hap_id_t cur_hap_id = hap_to_id.at(citer->first);
        packed_config[cur_hap_id] = static_cast<uchar>(citer->second);
    }

    return 0;
}

// Pass in packed_config and output samp_config
int Compute::convert_to_samp(packed_config_t const& packed_config, samp_config_t& samp_config) const {
    samp_config.clear();
    // Go through packed_config
    for(uint jj=0; jj<NUM_HAPS; ++jj) {
        if(packed_config[jj] > 0) {
            index_t index = id_to_hap.at(jj);
            samp_config[index] = packed_config[jj]; 
        }
    }
    return 0;
}

int Compute::display_probs() {
    for(probs_type::iterator p_iter = probs.begin(); p_iter != probs.end(); ++p_iter) {
        packed_config_t const &cur_samp = p_iter->first;
        display_samp_config(cur_samp);

        size_t old_precision = cout.precision();
        cout << setprecision(15);
        cout << "probability is: " << p_iter->second << endl;
        cout << setprecision(old_precision);
    }
    return 0;
}

// Solves the system Ax=b and puts solution in sol_vector.
int Compute::system_solve(vector_t& b_vector, matrix_t& A_matrix, vector_t& sol_vector) {
    temp_lin_solve(b_vector,A_matrix,sol_vector);

    return 0;
}

// Linear system solver
int Compute::temp_lin_solve(vector_t& b_vector, matrix_t& A_matrix, vector_t& sol_vector) {
    int n = A_matrix.size1();

    vector_t x0(n,1.0), x(n,0.0);
    double r, s, norm;
    const int max_iterations = 10000; 

    for(int kk=0; kk<max_iterations; ++kk) {
        for(int ii=0; ii<n; ++ii) {
            r = 0.0;
            for(int jj=0; jj<=ii-1;++jj) {
                r -= A_matrix(ii,jj) * x(jj);
            }
            s = 0.0;
            for(int jj=ii+1; jj<n; ++jj) {
                s-=A_matrix(ii,jj) * x0(jj);
            }
            assert(ii < n);
            x[ii] = (r+s+b_vector(ii)) / A_matrix(ii,ii);
        }
        norm = 0.0;
        for(int jj=0; jj<n; ++jj) {
            assert(jj < n);
            norm=max(norm, abs(x(jj) - x0(jj)));
        }
        // tolerance
        if(norm < 1e-30) {
            break;
        }
        x0 = x;
    }
    sol_vector = x;

    return 0;
}
