/**
 * Attempt to implement sparse Minimum-Entropy Couplings method
 * from Cicalese, Gargano and Vaccaro, 2017
 */
use ordered_float::NotNan;
use priority_queue::PriorityQueue;

/**
 * A discrete probability distribution, with non-increasing entries
 */
#[derive(Debug, Clone)]
pub struct DiscreteDistribution<A> {
    pub d: Vec<f64>,
    // mapping of values to dimensions
    pub labels: Option<Vec<A>>,
}

impl<A> DiscreteDistribution<A> {
    pub fn new(d: Vec<f64>) -> Self {
        DiscreteDistribution { d, labels: None }
    }

    pub fn from_iterator(d: impl IntoIterator<Item = f64>) -> Self {
        DiscreteDistribution {
            d: d.into_iter().collect::<Vec<_>>(),
            labels: None,
        }
    }

    pub fn from_iterator_and_labels(d: impl IntoIterator<Item = f64>, labels: Vec<A>) -> Self {
        DiscreteDistribution {
            d: d.into_iter().collect::<Vec<_>>(),
            labels: Some(labels),
        }
    }
}

pub fn order_descending<A: Clone>(distr: &mut DiscreteDistribution<A>) {
    let mut indices = (0..distr.d.len()).collect::<Vec<_>>();
    indices.sort_by_key(|&i| std::cmp::Reverse(NotNan::new(distr.d[i]).unwrap()));

    distr.labels = distr
        .labels
        .as_ref()
        .map(|v| indices.iter().map(|&i| v[i].clone()).collect::<Vec<_>>());

    distr.d = indices.iter().map(|&i| distr.d[i]).collect();
}

/**
 * @return argmax, in doubt first
 */
pub fn argmax<A: Clone>(distr: &DiscreteDistribution<A>) -> A {
    distr.labels.as_ref().unwrap()[distr
        .d
        .iter()
        .enumerate()
        .max_by_key(|(_i, x)| NotNan::new(**x).unwrap())
        .unwrap()
        .0]
        .clone()
}

pub type SparseCoupling = Vec<(f64, (usize, usize))>;

/**
 * @param p All DiscreteDistributions must be ordered in non-increasing order.
 * @param q All DiscreteDistributions must be ordered in non-increasing order.
 */
pub fn greatest_lower_bound(p: Vec<f64>, q: Vec<f64>) -> Vec<f64> {
    let mut result = vec![];
    let p1 = *p.first().unwrap_or(&0.0);
    let q1 = *q.first().unwrap_or(&0.0);
    result.push(if p1 < q1 { p1 } else { q1 });
    let n = std::cmp::max(p.len(), q.len());
    let mut sum_p = p1;
    let mut sum_q = q1;
    let mut sum_z = result[0];
    for i in 1..n {
        sum_p += *p.get(i).unwrap_or(&0.0);
        sum_q += *q.get(i).unwrap_or(&0.0);
        let zi = if sum_p < sum_q { sum_p } else { sum_q } - sum_z;
        result.push(zi);
        sum_z += zi;
    }
    result
}

pub fn min_entropy_joint_distribution_sparse(p: Vec<f64>, q: Vec<f64>) -> (SparseCoupling, bool) {
    let mut coupling = vec![];
    let n = std::cmp::max(p.len(), q.len());
    let mut i = None;
    'find_backmost_difference: for j in (0..n).rev() {
        if p.get(j).unwrap_or(&0.0) != q.get(j).unwrap_or(&0.0) {
            i = Some(j);
            break 'find_backmost_difference;
        }
    }

    // println!("backmost difference = {:?}", i);

    let swap_p_q = match i {
        Some(i) => p.get(i).unwrap_or(&0.0) < q.get(i).unwrap_or(&0.0),
        None => false,
    };
    let (p, q) = if swap_p_q {
        //  println!("swapping q,p");
        (q, p)
    } else {
        // println!("not swapping q,p");
        (p, q)
    };

    // println!("starting with p,q = {:?},{:?}", p, q);
    // println!();

    let z = greatest_lower_bound(p.clone(), q.clone());

    // println!("glb z = {:?}", z);

    let mut qrow: PriorityQueue<usize, std::cmp::Reverse<NotNan<f64>>> = PriorityQueue::new();
    let mut qcol: PriorityQueue<usize, std::cmp::Reverse<NotNan<f64>>> = PriorityQueue::new();
    let mut qrowsum = 0.0;
    let mut qcolsum = 0.0;
    let mut zir: f64;

    for i in (0..n).rev() {
        // println!();
        // println!("iteration i={}", i);

        let mut zid: f64 = z[i];

        if qcolsum + z[i] > *q.get(i).unwrap_or(&0.0) {
            // println!(
            //     "col overflow {:.2} + {:.2} > {:.2}, running move_masses",
            //     &qcolsum, z[i], q[i]
            // );
            let indices;
            (zid, zir, indices, qcolsum) =
                move_probability_masses_up(z[i], *q.get(i).unwrap_or(&0.0), &mut qcol, qcolsum);
            for (m, l) in indices {
                coupling.push((m, (l, i)));
            }
            if zir > 0.0 {
                // println!("col-pushing zir ({:.2})", zir);
                qcol.push(i, std::cmp::Reverse(NotNan::new(zir).unwrap()));
                qcolsum += zir;
            }
        } else {
            // println!("col okay");
            for (l, m) in qcol.into_sorted_iter() {
                let m = m.0.into();
                qcolsum -= m;
                coupling.push((m, (l, i)))
            }
            qcol = PriorityQueue::new();
        }

        if qrowsum + zid > *p.get(i).unwrap_or(&0.0) {
            // println!(
            //     "row overflow {:.2} + {:.2} > {:.2}, running move_masses",
            //     &qrowsum, zid, p[i]
            // );
            let indices;
            (zid, zir, indices, qrowsum) =
                move_probability_masses_up(zid, *p.get(i).unwrap_or(&0.0), &mut qrow, qrowsum);
            for (m, l) in indices {
                coupling.push((m, (i, l)));
            }
            if zir > 0.0 {
                // println!("row-pushing zir ({:.2})", zir);
                qrow.push(i, std::cmp::Reverse(NotNan::new(zir).unwrap()));
                qrowsum += zir;
            }
        } else {
            // println!("row okay");
            for (l, m) in qrow.into_sorted_iter() {
                let m = m.0.into();
                qrowsum -= m;
                coupling.push((m, (i, l)))
            }
            qrow = PriorityQueue::new();
        }

        // println!("pushing zid: {:.2}", zid);
        coupling.push((zid, (i, i)));

        // println!(
        //     "at iteration {}, {:?}, qrowsum,qcolsum={},{}",
        //     i,
        //     coupling.clone().into_iter().rev().collect::<Vec<_>>(),
        //     &qrowsum,
        //     &qcolsum
        // );
    }

    (coupling, swap_p_q)
}

fn comparable_to_f64(r: std::cmp::Reverse<NotNan<f64>>) -> f64 {
    r.0.into()
}

/**
 * @param z > 0
 * @param x >= 0
 * @param sum of queue values = qsum, and qsum + x >= z
 */
fn move_probability_masses_up(
    z: f64,
    x: f64,
    queue: &mut PriorityQueue<usize, std::cmp::Reverse<NotNan<f64>>>,
    qsum: f64,
) -> (f64, f64, Vec<(f64, usize)>, f64) {
    let mut indices = vec![];
    let mut sum = 0.0;
    let mut qsum = qsum;

    // let difference_abs = (queue
    //     .iter()
    //     .map(|x| comparable_to_f64(*x.1))
    //     .sum::<f64>()
    //     .abs()
    //     - qsum.abs())
    // .abs();

    // if !(
    //     difference_abs
    //         <= 10.0*EPSILON
    // ) {
    //     eprintln!("{}", difference_abs);
    // };

    while !queue.is_empty() && sum + comparable_to_f64(*queue.peek().unwrap().1) < x {
        let (l, m) = queue.pop().unwrap();
        // println!(
        //     "queue not empty, processing ({}, {})",
        //     comparable_to_f64(m),
        //     l
        // );
        let m = comparable_to_f64(m);
        qsum -= m;
        indices.push((m, l));
        sum += m;
    }

    let zd = x - sum;
    let zr = z - zd;

    // println!("Result of Lemma_3: {}, {}, {:?}, {}", zd, zr, indices, qsum);
    (zd, zr, indices, qsum)
}

pub trait Entropic {
    fn entropy(&self) -> f64;
}

impl Entropic for Vec<f64> {
    fn entropy(&self) -> f64 {
        self.iter()
            .map(|x| if *x == 0.0 { 0.0 } else { -*x * f64::log2(*x) })
            .sum()
    }
}

impl Entropic for SparseCoupling {
    fn entropy(&self) -> f64 {
        self.iter()
            .map(|(x, _)| if *x == 0.0 { 0.0 } else { -*x * f64::log2(*x) })
            .sum()
    }
}
