#![feature(assert_matches)]
#![feature(sort_floats)]

#[cfg(test)]
extern crate quickcheck;
#[cfg(test)]
#[macro_use(quickcheck)]
extern crate quickcheck_macros;

pub mod mec;
use mec::*;

pub mod imec;
use imec::*;

pub mod markov;
#[cfg(test)]
use markov::*;

#[macro_use]
extern crate slog;

// #[cfg(test)]
// #[macro_use]
// extern crate matches;

#[cfg(test)]
mod tests {
    use std::{assert_matches::assert_matches, collections::HashMap};

    use crate::common::{NextTokenDistributionProvider, compute_marginals};

    use super::*;

    use float_cmp::ApproxEq;
    use rand::SeedableRng;
    use rand_chacha::ChaCha8Rng;

    use itertools::Itertools;

    #[test]
    fn regression_test_1() {
        let p_nx: DiscreteDistribution<()> = DiscreteDistribution {
            d: vec![0.3, 0.3, 0.4],
            //  vec![0.5, 0.5],
            // vec![0.3, 0.3, 0.4],
            labels: None, // Some(vec!["A", "B", "C"]),
        };
        let q_mu: DiscreteDistribution<()> = DiscreteDistribution {
            d: // vec![0.25, 0.25, 0.25, 0.25],
              vec![0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125],
            labels: None, // Some(vec![0, 1, 2, 3, 4, 5, 6, 7]),
        };
        let (result, swap_p_q) =
            min_entropy_joint_distribution_sparse(p_nx.d.clone(), q_mu.d.clone());
        let (rowsums, colsums) = compute_marginals(&result);

        println!("{}, {:?}, {:?}", swap_p_q, rowsums, colsums);

        compare_float_vectors_approx(rowsums, q_mu.d);
        compare_float_vectors_approx(colsums, p_nx.d);
    }

    #[test]
    fn regression_test_2() {
        let d_in = vec![
            0.12357846647500992,
            0.12231964617967606,
            0.09524810314178467,
            0.040485963225364685,
            0.028101634234189987,
            0.025149814784526825,
            0.02421366609632969,
            0.022305995225906372,
            0.02039368450641632,
            0.01820506528019905,
        ];
        let d_in_sum = d_in.iter().sum::<f64>();

        let p_nx: DiscreteDistribution<()> = DiscreteDistribution {
            d: d_in
                .clone()
                .iter()
                .map(|x| x / d_in_sum)
                .collect::<Vec<_>>(),
            labels: None,
            // Some(vec![" It", "\n", " The", " And", " You", " We", " I", " So", " But", " That"]),
        };
        let q_mu: DiscreteDistribution<()> = DiscreteDistribution {
            d: vec![
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
                0.09090909090909091,
            ],
            labels: None,
        };
        let (result, swap_p_q) =
            min_entropy_joint_distribution_sparse(p_nx.d.clone(), q_mu.d.clone());
        let (rowsums, colsums) = compute_marginals(&result);

        println!("{}, {:?}, {:?}", swap_p_q, rowsums, colsums);

        println!("d_in sum = {}", d_in.iter().sum::<f64>());
        println!("rowsums sum = {}", rowsums.iter().sum::<f64>());
        println!("colsums sum = {}", rowsums.iter().sum::<f64>());

        compare_float_vectors_approx(rowsums, q_mu.d);
        compare_float_vectors_approx(colsums, p_nx.d);
    }

    fn check_marginals(coup: &SparseCoupling) {
        let (rowsums, colsums) = compute_marginals(coup);

        println!();

        println!(
            "rows sum to: [{}]",
            rowsums
                .into_iter()
                .map(|x| format!("{:.2}", x))
                .collect::<Vec<_>>()
                .join(", ")
        );
        println!(
            "cols sum to: [{}]",
            colsums
                .into_iter()
                .map(|x| format!("{:.2}", x))
                .collect::<Vec<_>>()
                .join(", ")
        );
    }

    fn compare_float_vectors_approx(xs: Vec<f64>, bs: Vec<f64>) {
        xs.into_iter().zip(bs).for_each(|(a, b)| {
            assert!(a.approx_eq(b, (1e-12, 2)), "{a} vs. {b} (approx)");
        });
    }

    /**
     * @param xs nonnegative numbers, NaN-free
     */
    fn normalized(xs: Vec<f64>) -> Vec<f64> {
        let sum: f64 = xs.iter().sum();
        if sum == 0.0 {
            return xs;
        };
        return xs.iter().map(|x| x / sum).collect();
    }

    #[derive(Debug, Copy, Clone)]
    struct Probability(f64);

    impl quickcheck::Arbitrary for Probability {
        fn arbitrary(g: &mut quickcheck::Gen) -> Self {
            let mut result = f64::NAN;
            while result.is_nan() {
                result = f64::arbitrary(g).abs() % 1.0
            }
            Probability(result)
        }
    }

    #[derive(Debug, Clone)]
    struct XDiscreteDistribution(DiscreteDistribution<()>);

    impl quickcheck::Arbitrary for XDiscreteDistribution {
        fn arbitrary(g: &mut quickcheck::Gen) -> Self {
            let size = { u8::arbitrary(g) };
            let mut d: Vec<f64> =
                normalized((0..size).map(|_| Probability::arbitrary(g).0).collect());
            d.sort_floats();
            XDiscreteDistribution(DiscreteDistribution { d, labels: None })
        }
    }

    #[test]
    fn glb_example_from_paper() {
        let p = vec![0.4, 0.3, 0.15, 0.08, 0.04, 0.03];
        let q = vec![0.44, 0.18, 0.18, 0.15, 0.03, 0.02];
        let result = greatest_lower_bound(p, q);

        compare_float_vectors_approx(result, vec![0.4, 0.22, 0.18, 0.13, 0.04, 0.03]);
    }

    #[test]
    fn mec_example_from_paper() {
        let p = vec![0.4, 0.3, 0.15, 0.08, 0.04, 0.03];
        let q = vec![0.44, 0.18, 0.18, 0.15, 0.03, 0.02];
        let n = std::cmp::max(p.len(), q.len());

        let (mejd, swap_p_q) = min_entropy_joint_distribution_sparse(p.clone(), q.clone());

        assert!(!swap_p_q);

        check_marginals(&mejd);

        println!("H(M) = {}", mejd.entropy());
        println!(
            "hmark = {}",
            vec![
                0.4, 0.04, 0.18, 0.03, 0.05, 0.15, 0.08, 0.02, 0.02, 0.01, 0.02
            ]
            .entropy()
        );
        println!("H(glb(p,q)) = {}", greatest_lower_bound(p, q).entropy());
        println!("{:?}", mejd.clone().into_iter().rev().collect::<Vec<_>>());

        let expected_m = [
            (0.4, (0, 0)),
            (0.04, (1, 0)),
            (0.18, (1, 1)),
            (0.03, (1, 2)),
            (0.05, (1, 3)),
            (0.15, (2, 2)),
            (0.08, (3, 3)),
            (0.02, (4, 3)),
            (0.02, (4, 4)),
            (0.01, (5, 4)),
            (0.02, (5, 5)),
        ];

        let expected_hm = HashMap::from(expected_m.map(|(x, y)| (y, x)));
        let obtained_hm: HashMap<(usize, usize), f64> =
            HashMap::from_iter(mejd.into_iter().map(|(x, y)| (y, x)));

        for i in 0..n {
            for j in 0..n {
                let exp = expected_hm.get(&(i, j)).unwrap_or(&0.0);
                let obt = obtained_hm.get(&(i, j)).unwrap_or(&0.0);
                let diff = (exp - obt).abs();
                if diff > 1e-16 {
                    println!("{},{}: {:.2} ({:?})", i, j, diff, diff);
                }
            }
        }
    }

    #[quickcheck]
    fn glb_is_symmetric(p: XDiscreteDistribution, q: XDiscreteDistribution) {
        let p = p.0.d;
        let q = q.0.d;
        let glb_p_q = greatest_lower_bound(p.clone(), q.clone());
        let glb_q_p = greatest_lower_bound(q, p);
        compare_float_vectors_approx(glb_p_q, glb_q_p)
    }

    // #[async_std::test]
    // #[quickcheck_async::tokio]    
    #[tokio::test]
    #[quickcheck]
    async fn encode_decode_memoryless(
        alphabet_data: Vec<(usize, u16)>,
        input_space_size: u8,
        input: Vec<usize>,
        random_seed: u64,
    ) {
        if input_space_size < 2 {
            return;
        }

        let mut rng = ChaCha8Rng::seed_from_u64(random_seed);
        let alphabet_data = alphabet_data
            .into_iter()
            .unique_by(|x| x.1)
            .collect::<Vec<_>>();

        if alphabet_data.len() < 2 {
            return;
        }

        let input: Vec<usize> = input
            .into_iter()
            .map(|x| x.rem_euclid(input_space_size as usize))
            .collect();

        if input.is_empty() {
            return;
        }

        println!(
            "|In|={}, |Out|={}, |input|={}, {}",
            input_space_size,
            alphabet_data.len(),
            input.len(),
            random_seed
        );

        let alphabet: Vec<_> = alphabet_data.iter().map(|x| x.1).collect::<Vec<_>>();
        let probs_raw = alphabet_data.into_iter().map(|x| x.0).collect::<Vec<_>>();
        let prob_sum: f64 = probs_raw.iter().map(|x| *x as f64).sum();
        let probs = probs_raw
            .into_iter()
            .map(|x| (x as f64) / prob_sum)
            .collect::<Vec<f64>>();

        let input_space_labels: Vec<usize> = (0..input_space_size as usize).collect();
        let mut provider: DiscreteDistribution<_> = DiscreteDistribution {
            d: probs,
            labels: Some(alphabet.clone()),
        };

        // TODO print this data only in case of failure -> rerun with seed
        // println!("input = {:?} (lab={:?})", input, input_space_labels);
        // println!("alphabet = {:?}", alphabet);
        // println!("token distro = {:?}", provider);

        let encoded = encode::encode(
            &mut rng,
            &input[..],
            &input_space_labels[..],
            &mut provider,
            (),
            None,
            None,
        )
        .await;
        match encoded {
            Ok(encoded) => {
                let decoded = decode::decode(
                    &mut encoded.into_iter(),
                    &input_space_labels[..],
                    &mut provider,
                    input.len(),
                    (),
                    None,
                    None,
                )
                .await;

                match decoded {
                    Ok(decoded) => {
                        assert_eq!(input, decoded);
                    }
                    Err(_err) => {
                        assert!(false);
                    }
                }
            }
            Err(_err) => {
                assert!(false);
            }
        }
    }

    #[quickcheck]
    fn mejd_marginalises_correctly(p_raw: Vec<usize>, q_raw: Vec<usize>) {
        let p_sum: f64 = p_raw.iter().map(|x| *x as f64).sum::<f64>();
        let q_sum: f64 = q_raw.iter().map(|x| *x as f64).sum::<f64>();
        if p_sum == 0.0 || q_sum == 0.0 {
            return;
        }
        let mut p: DiscreteDistribution<()> =
            DiscreteDistribution::from_iterator(p_raw.iter().map(|x| (*x as f64) / p_sum));
        let mut q: DiscreteDistribution<()> =
            DiscreteDistribution::from_iterator(p_raw.iter().map(|x| (*x as f64) / p_sum));
        order_descending(&mut p);
        order_descending(&mut q);
        let (result, swap_p_q) = min_entropy_joint_distribution_sparse(p.d.clone(), q.d.clone());
        assert_eq!(
            result
                .iter()
                .map(|(_, (i, j))| std::cmp::max(i, j))
                .max()
                .copied()
                .map(|x| x < std::cmp::max(p_raw.len(), q_raw.len())),
            Some(true)
        );

        let (rowsums, colsums) = compute_marginals(&result);

        compare_float_vectors_approx(rowsums, if swap_p_q { p.d.clone() } else { q.d.clone() });
        compare_float_vectors_approx(colsums, if swap_p_q { q.d } else { p.d });
    }

    #[tokio::test]
    async fn markov_chain_internal_test() -> Result<(), anyhow::Error> {
        // https://doc.rust-lang.org/book/ch04-03-slices.html
        let inputs = ["Here's a small programming problem: write a function that takes a string of words separated by spaces and returns the first word it finds in that string. If the function doesn't find a space in the string, the whole string must be one word, so the entire string should be returned.".to_string()];

        let token_iterator = TokenIterator::from_string(&inputs[0])?;

        let vec = token_iterator.collect::<Vec<_>>();
        // token_iterator.take(20).collect::<Vec<_>>();
        // println!("{:#?}", vec);
        assert!(!vec.is_empty());

        let token_iterator = TokenIterator::from_string(&inputs[0])?;

        let up_to = 3;
        let mut mc = MarkovChainInternal::from_token_iterator(token_iterator, up_to)?;

        assert_eq!(mc.state, vec![] as Vec<String>);

        // mc.set_state(vec!["Here's".to_string()]);
        mc.reset(vec!["Here's".to_string()]).await?;

        // println!("{:?}", mc);

        let distro = mc.get().await;

        assert_matches!(distro, Ok(..));

        println!("{:?}", distro);

        Ok(())
    }
}
