//! A confusion matrix is used in data-mining as a summary of the performance
//! of a classification algorithm.
//!
//! This crate allows you to create an empty confusion matrix, and then 
//! populate it incrementally with the (actual, predicted) results from a
//! data-mining experiment. At any point, various statistical measures 
//! and results may be computed.
//!

use std::collections::HashMap;
use std::fmt;
use std::ops;

/// A confusion matrix is used to record pairs of (actual class, predicted class)
/// as typically produced by a classification algorithm.
/// 
/// It is designed to be called incrementally, as results are obtained 
/// from the classifier model. Class labels currently must be strings.
///
/// At any point, statistics may be obtained by calling the relevant methods.
///
/// A two-class example is:
/// ```text 
///     Predicted       Predicted     | 
///     Positive        Negative      | Actual
///     ------------------------------+------------
///         a               b         | Positive
///         c               d         | Negative
/// ```
///
/// Here:
///
/// * a is the number of true positives (those labelled positive and classified
///   positive)
/// * b is the number of false negatives (those labelled positive but classified
///   negative)
/// * c is the number of false positives (those labelled negative but classified 
///   positive)
/// * d is the number of true negatives (those labelled negative and classified 
///   negative)
///
/// From this table, we can calculate statistics like:
///
/// * true_positive_rate = a/(a+b)
/// * positive recall =    a/(a+c)
/// 
/// The implementation supports confusion matrices with more than two classes, 
/// and hence most statistics are calculated with reference to a named class. 
/// When more than two classes are in use, the statistics are calculated as if 
/// the named class were positive and all the other classes are grouped as if 
/// negative.
/// 
/// For example, in a three-class example:
/// 
/// ```text
/// Predicted       Predicted     Predicted     | 
///    Red            Blue          Green       | Actual
/// --------------------------------------------+------------
///     a               b             c         | Red
///     d               e             f         | Blue
///     g               h             i         | Green
/// ```
/// We can calculate:
/// 
/// * true_red_rate = a/(a+b+c)
/// * red recall =    a/(a+d+g)
/// 
/// # Example
///
/// The following example creates a simple two-class confusion matrix, prints 
/// a few statistics and displays the table.
/// 
/// ```ignore
/// use confusion_matrix;
/// 
/// fn main() {
///     let mut cm = confusion_matrix::new();
/// 
///     cm[("pos", "pos")] = 10;
///     cm[("pos", "neg")] = 5;
///     cm[("neg", "neg")] = 20;
///     cm[("neg", "pos")] = 3;
///     
///     println!("Precision: {}", cm.precision("pos"));
///     println!("Recall: {}", cm.recall("pos"));
///     println!("MCC: {}", cm.matthews_correlation("pos"));
///     println!("");
///     println!("{}", cm);
/// }
/// ``` 
/// Output:
/// 
/// ```text
/// Precision: 0.7692307692307693
/// Recall: 0.6666666666666666
/// MCC: 0.5524850114241865
/// 
/// Predicted |
/// neg pos   | Actual
/// ----------+-------
///  20   3   | neg
///   5  10   | pos
/// ```
/// 
#[derive(Clone,Debug,Default,Eq,PartialEq)]
pub struct ConfusionMatrix {
    matrix: HashMap<String, HashMap<String, usize>>,
}

/// Creates a new empty instance of a confusion matrix.
///
/// # Example
/// ```
/// let mut cm = confusion_matrix::new();
/// ```
///
pub fn new() -> ConfusionMatrix {
    ConfusionMatrix {
        matrix: HashMap::new(),
    }
}

impl fmt::Display for ConfusionMatrix {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        let ls = self.labels();
        let mut result = String::new();

        let mut title_line = String::from("Predicted ");
        let mut label_line = String::new();

        for l in &ls {
            label_line.push_str(&l);
            label_line.push(' ');
        }
        while label_line.len() < title_line.len() {
            label_line.push(' ');
        }
        while title_line.len() < label_line.len() {
            title_line.push(' ');
        }
        result.push_str(&title_line);
        result.push_str("|\n");
        result.push_str(&label_line);
        result.push_str("| Actual\n");

        for _ in 0..title_line.len() {
            result.push('-');
        }
        result.push_str("+-------\n");

        for l in &ls {
            let mut count_line = String::new();

            for i in 0..ls.len() {
                count_line.push_str(&format!("{:>1$} ", self.count_for(&l, &ls[i]), ls[i].len()));
            }

            result.push_str(&format!("{:<2$}| {}\n", &count_line, &l, title_line.len()));
        }

        write!(f, "{}", result)
    }
}

impl ops::Index<(&str, &str)> for ConfusionMatrix {
    type Output = usize;

    /// Returns the count for an (actual, prediction) pair, or 0 if the pair 
    /// is not known.
    ///
    /// * `actual` - the actual class of the instance, which we are hoping 
    ///              the classifier will predict.
    /// * `prediction` - the predicted class for the instance, as output from
    ///                  the classifier.
    /// 
    /// # Example 
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(2, cm[("positive", "positive")]);
    /// assert_eq!(0, cm[("positive", "not_known")]);
    /// ```
    ///
    fn index(&self, (actual, prediction): (&str, &str)) -> &usize {
        if let Some(predictions) = self.matrix.get(actual) {
            if let Some(count) = predictions.get(prediction) {
                return count;
            }
        }
        &0
    }
}

impl ops::IndexMut<(&str, &str)> for ConfusionMatrix {
    /// Provides a mutable reference to the count for an (actual, prediction) pair.
    ///
    /// * `actual` - the actual class of the instance, which we are hoping 
    ///              the classifier will predict.
    /// * `prediction` - the predicted class for the instance, as output from
    ///                  the classifier.
    /// 
    /// # Example 
    ///
    /// ```
    /// let mut cm = confusion_matrix::new();
    /// for _ in 0..2 { cm[("positive", "positive")] += 1; }
    /// cm[("positive", "negative")] = 5;
    /// cm[("negative", "positive")] = 1;
    /// for _ in 0..3 { cm[("negative", "negative")] += 1; }
    /// assert_eq!(2, cm[("positive", "positive")]);
    /// assert_eq!(5, cm[("positive", "negative")]);
    /// assert_eq!(0, cm[("positive", "not_known")]);
    /// ```
    ///
    fn index_mut(&mut self, (actual, prediction): (&str, &str)) -> &mut usize {
        // make sure there is a slot for (actual, prediction)
        if !self.matrix.contains_key(actual) {
            self.matrix.insert(String::from(actual), HashMap::new());
        }
        if let Some(predictions) = self.matrix.get_mut(actual) {
            if None == predictions.get(prediction) {
                predictions.insert(String::from(prediction), 0);
            }
        }
        // return a mutable reference to (actual, prediction) slot
        self.matrix.get_mut(actual)
            .expect("Confusion matrix must contain actual value")
            .get_mut(prediction)
            .expect("Confusion matrix must contain predicted value")
    }
}

impl ConfusionMatrix {
    /// Adds one result to the matrix.
    /// 
    /// * `actual` - the actual class of the instance, which we are hoping 
    ///              the classifier will predict.
    /// * `prediction` - the predicted class for the instance, as output from
    ///                  the classifier.
    ///
    /// # Example
    ///
    /// The following table can be made as:
    /// ```text 
    ///     Predicted       Predicted     | 
    ///     Positive        Negative      | Actual
    ///     ------------------------------+------------
    ///         2               5         | Positive
    ///         1               3         | Negative
    /// ```
    ///
    /// ```
    /// let mut cm = confusion_matrix::new();
    /// for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// ```
    ///
    pub fn add_for(&mut self, actual: &str, prediction: &str) {
        if !self.matrix.contains_key(actual) {
            self.matrix.insert(String::from(actual), HashMap::new());
        }
        if let Some(predictions) = self.matrix.get_mut(actual) {
            let mut next_count = 1;
            if let Some(count) = predictions.get(prediction) {
                next_count = *count + 1;
            }
            predictions.insert(String::from(prediction), next_count);
        }
    }

    /// Returns the count for an (actual, prediction) pair, or 0 if the pair 
    /// is not known.
    ///
    /// * `actual` - the actual class of the instance, which we are hoping 
    ///              the classifier will predict.
    /// * `prediction` - the predicted class for the instance, as output from
    ///                  the classifier.
    /// 
    /// # Example 
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(2, cm.count_for("positive", "positive"));
    /// assert_eq!(0, cm.count_for("positive", "not_known"));
    /// ```
    ///
    pub fn count_for(&self, actual: &str, prediction: &str) -> usize {
        if let Some(predictions) = self.matrix.get(actual) {
            if let Some(count) = predictions.get(prediction) {
                return *count;
            }
        }
        0
    }

    /// Returns the number of instances of the given class label which 
    /// are incorrectly classified.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(5, cm.false_negative("positive"));
    /// assert_eq!(1, cm.false_negative("negative"));
    /// ```
    ///
    pub fn false_negative(&self, label: &str) -> usize {
        let mut total = 0;

        if let Some(predictions) = self.matrix.get(label) {
            for (key, count) in predictions.iter() {
                if key != label {
                    total += *count;
                }
            }
        }

        total
    }

    /// Returns the number of incorrectly classified instances with the given 
    /// class label.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(1, cm.false_positive("positive"));
    /// assert_eq!(5, cm.false_positive("negative"));
    /// ```
    ///
    pub fn false_positive(&self, label: &str) -> usize {
        let mut total = 0;

        for (key, predictions) in self.matrix.iter() {
            if key != label {
                if let Some(count) = predictions.get(label) {
                    total += *count;
                }
            }
        }

        total
    }

    /// Returns the proportion of instances of the given class label which 
    /// are incorrectly classified out of all those instances not originally 
    /// of that label.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert!((cm.false_rate("positive") - 1.0/4.0).abs() < 0.001); // true_rate("positive") = 1/(1+3)
    /// assert!((cm.false_rate("negative") - 5.0/7.0).abs() < 0.001); // true_rate("negative") = 5/(5+2)
    /// ```
    ///
    pub fn false_rate(&self, label: &str) -> f64 {
        let fp = self.false_positive(label);
        let tn = self.true_negative(label);

        divide(fp, fp + tn)
    }

    /// Returns the F-measure for a given class label, which is the harmonic
    /// mean of the precision and recall for that label.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn f_measure(&self, label: &str) -> f64 {
        let divisor = self.precision(label) + self.recall(label);
        if divisor == 0.0 {
            0.0
        } else {
            2.0 * self.precision(label) * self.recall(label) / divisor
        }
    }

    /// Returns the geometric mean of the true rates for each class label.
    ///
    pub fn geometric_mean(&self) -> f64 {
        if self.matrix.is_empty() {
            0.0
        } else {
            let mut product = 1.0;

            for key in self.matrix.keys() {
                product *= self.true_rate(key);
            }

            product.powf(1.0 / (self.matrix.len() as f64))
        }
    }

    /// Returns Cohen's Kappa Statistic, which is a measure of the quality 
    /// of binary classification.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn kappa(&self, label: &str) -> f64 {
        let tp = self.true_positive(label);
        let ff = self.false_negative(label);
        let fp = self.false_positive(label);
        let tn = self.true_negative(label);
        let total = tp + ff + fp + tn;
        let total_accuracy = divide(tp + tn, tp + tn + fp + ff);
        let random_accuracy = divide((tn + fp) * (tn + ff) + (ff + tp) * (fp + tp), total * total);

        if random_accuracy == 1.0 {
            0.0
        } else {
            (total_accuracy - random_accuracy) / (1.0 - random_accuracy)
        }
    }

    /// Returns a sorted vector of the class labels contained in the matrix.
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(vec!["negative", "positive"], cm.labels());
    /// ```
    ///
    pub fn labels(&self) -> Vec<String> {
        let mut result = vec![];

        for (key, predictions) in self.matrix.iter() {
            result.push(key.clone());
            for key in predictions.keys() {
                result.push(key.clone());
            }
        }

        result.sort();
        result.dedup();
        result
    }

    /// Returns the Matthews Correlation Coefficient, which is a measure of 
    /// the quality of binary classification.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn matthews_correlation(&self, label: &str) -> f64 {
        let tp = self.true_positive(label);
        let ff = self.false_negative(label);
        let fp = self.false_positive(label);
        let tn = self.true_negative(label);
        let divisor = (((tp + fp) * (tp + ff) * (tn + fp) * (tn + ff)) as f64).sqrt();

        if divisor == 0.0 {
            0.0
        } else {
            ((tp * tn - fp * ff) as f64) / divisor
        }
    }

    /// Returns the proportion of instances which are correctly labelled.
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert!((cm.overall_accuracy() - (2.0+3.0)/11.0) < 0.001); // overall_accuracy() = (2+3)/(2+5+1+3)
    /// ```
    ///
    pub fn overall_accuracy(&self) -> f64 {
        let mut total_correct = 0;

        for key in self.matrix.keys() {
            total_correct += self.true_positive(key);
        }

        divide(total_correct, self.total())
    }

    /// Returns the proportion of instances classified as the given class label
    /// which are correct.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert!((cm.precision("positive") - 2.0/3.0) < 0.001); // precision("positive") = 2/(2+1)
    /// ```
    ///
    pub fn precision(&self, label: &str) -> f64 {
        let tp = self.true_positive(label);
        let fp = self.false_positive(label);

        divide(tp, tp + fp)
    }

    /// Returns the proportion of instances of the given label, 
    /// out of the total.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert!((cm.prevalence("positive") - 7.0/11.0).abs() < 0.001); // prevalence = (2+5)/(2+5+1+3)
    /// ```
    ///
    pub fn prevalence(&self, label: &str) -> f64 {
        let tp = self.true_positive(label);
        let ff = self.false_negative(label);
        let fp = self.false_positive(label);
        let tn = self.true_negative(label);
        let total = tp + ff + fp + tn;

        divide(tp + ff, total)
    }

    /// Recall is another name for the true positive rate for that label.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn recall(&self, label: &str) -> f64 {
        self.true_rate(label)
    }

    /// Sensitivity is another name for the true positive rate (recall) for 
    /// that label.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn sensitivity(&self, label: &str) -> f64 {
        self.true_rate(label)
    }

    /// Returns 1 - false_rate(label)
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    pub fn specificity(&self, label: &str) -> f64 {
        1.0 - self.false_rate(label)
    }

    /// Returns the total number of instances referenced in the matrix.
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(11, cm.total());
    /// ```
    ///
    pub fn total(&self) -> usize {
        let mut total = 0;

        for predictions in self.matrix.values() {
            for count in predictions.values() {
                total += *count;
            }
        }

        total
    }

    /// Returns the number of instances NOT of the given class label which 
    /// are correctly classified.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(3, cm.true_negative("positive"));
    /// assert_eq!(2, cm.true_negative("negative"));
    /// ```
    ///
    pub fn true_negative(&self, label: &str) -> usize {
        let mut total = 0;

        for (key, predictions) in self.matrix.iter() {
            if key != label {
                if let Some(count) = predictions.get(key) {
                    total += *count;
                }
            }
        }

        total
    }

    /// Returns the number of instances of the given class label which 
    /// are correctly classified.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert_eq!(2, cm.true_positive("positive"));
    /// assert_eq!(3, cm.true_positive("negative"));
    /// ```
    ///
    pub fn true_positive(&self, label: &str) -> usize {
        if let Some(predictions) = self.matrix.get(label) {
            if let Some(count) = predictions.get(label) {
                return *count;
            }
        }
        0
    }

    /// Returns the proportion of instances of the given class label which 
    /// are correctly classified.
    ///
    /// * `label` - the class label to treat as "positive".
    ///
    /// # Example
    /// (using `cm` from [`Self::add_for()`])
    ///
    /// ```
    /// # let mut cm = confusion_matrix::new();
    /// # for _ in 0..2 { cm.add_for("positive", "positive"); }
    /// # for _ in 0..5 { cm.add_for("positive", "negative"); }
    /// # for _ in 0..1 { cm.add_for("negative", "positive"); }
    /// # for _ in 0..3 { cm.add_for("negative", "negative"); }
    /// assert!((cm.true_rate("positive") - 2.0/7.0) < 0.001); // true_rate("positive") = 2/(2+5)
    /// assert!((cm.true_rate("negative") - 3.0/4.0) < 0.001); // true_rate("negative") = 3/(1+3)
    /// ```
    ///
    pub fn true_rate(&self, label: &str) -> f64 {
        let tp = self.true_positive(label);
        let ff = self.false_negative(label);

        divide(tp, tp + ff)
    }
}

fn divide(x: usize, y: usize) -> f64 {
    if y == 0 {
        0.0
    } else {
        (x as f64) / (y as f64)
    }
}

#[cfg(test)]
mod tests {
    use super::new;

    fn test_approx_same(num1: f64, num2: f64) {
        assert!((num1 - num2).abs() < 0.001);
    }

    #[test]
    fn test_empty_case() {
        let cm = new();
        assert_eq!(0, cm.total());
        assert_eq!(0, cm.true_positive("none"));
        assert_eq!(0, cm.false_negative("none"));
        assert_eq!(0, cm.false_positive("none"));
        assert_eq!(0, cm.true_negative("none"));
        test_approx_same(0.0, cm.true_rate("none"));
    }

    #[test]
    fn test_two_classes() {
        let mut cm = new();
        for _ in 0..10 {
            cm.add_for("pos", "pos");
        }
        for _ in 0..5 {
            cm[("pos", "neg")] += 1;
        }
        cm[("neg", "neg")] = 20;
        cm[("neg", "pos")] = 5;

        assert_eq!(vec!["neg", "pos"], cm.labels());
        assert_eq!(10, cm[("pos", "pos")]);
        assert_eq!(5, cm[("pos", "neg")]);
        assert_eq!(20, cm[("neg", "neg")]);
        assert_eq!(5, cm[("neg", "pos")]);

        assert_eq!(40, cm.total());
        assert_eq!(10, cm.true_positive("pos"));
        assert_eq!(5, cm.false_negative("pos"));
        assert_eq!(5, cm.false_positive("pos"));
        assert_eq!(20, cm.true_negative("pos"));
        assert_eq!(20, cm.true_positive("neg"));
        assert_eq!(5, cm.false_negative("neg"));
        assert_eq!(5, cm.false_positive("neg"));
        assert_eq!(10, cm.true_negative("neg"));

        test_approx_same(0.6667, cm.true_rate("pos"));
        test_approx_same(0.8, cm.true_rate("neg"));
        test_approx_same(0.2, cm.false_rate("pos"));
        test_approx_same(0.3333, cm.false_rate("neg"));
        test_approx_same(0.6667, cm.precision("pos"));
        test_approx_same(0.8, cm.precision("neg"));
        test_approx_same(0.6667, cm.recall("pos"));
        test_approx_same(0.8, cm.recall("neg"));
        test_approx_same(0.6667, cm.sensitivity("pos"));
        test_approx_same(0.8, cm.sensitivity("neg"));
        test_approx_same(0.75, cm.overall_accuracy());
        test_approx_same(0.6667, cm.f_measure("pos"));
        test_approx_same(0.8, cm.f_measure("neg"));
        test_approx_same(0.7303, cm.geometric_mean());
    }

    // Example from:
    // https://www.datatechnotes.com/2019/02/accuracy-metrics-in-classification.html
    #[test]
    fn test_two_classes_2() {
        let mut cm = new();
        cm[("pos", "pos")] = 5;
        cm[("pos", "neg")] = 1;
        cm[("neg", "neg")] = 3;
        cm[("neg", "pos")] = 2;

        assert_eq!(11, cm.total());
        assert_eq!(5, cm.true_positive("pos"));
        assert_eq!(1, cm.false_negative("pos"));
        assert_eq!(2, cm.false_positive("pos"));
        assert_eq!(3, cm.true_negative("pos"));

        test_approx_same(0.7142, cm.precision("pos"));
        test_approx_same(0.8333, cm.recall("pos"));
        test_approx_same(0.7272, cm.overall_accuracy());
        test_approx_same(0.7692, cm.f_measure("pos"));
        test_approx_same(0.8333, cm.sensitivity("pos"));
        test_approx_same(0.6, cm.specificity("pos"));
        test_approx_same(0.4407, cm.kappa("pos"));
        test_approx_same(0.5454, cm.prevalence("pos"));
    }

    // Examples from:
    // https://standardwisdom.com/softwarejournal/2011/12/matthews-correlation-coefficient-how-well-does-it-do/
    fn two_class_case(
        a: usize,
        b: usize,
        c: usize,
        d: usize,
        e: f64,
        f: f64,
        g: f64,
        h: f64,
        i: f64,
        ) {
        let mut cm = new();
        cm[("pos", "pos")] = a;
        cm[("pos", "neg")] = b;
        cm[("neg", "neg")] = c;
        cm[("neg", "pos")] = d;

        test_approx_same(e, cm.matthews_correlation("pos"));
        test_approx_same(f, cm.precision("pos"));
        test_approx_same(g, cm.recall("pos"));
        test_approx_same(h, cm.f_measure("pos"));
        test_approx_same(i, cm.kappa("pos"));
    }

    #[test]
    fn test_two_classes_3() {
        two_class_case(100, 0, 900, 0, 1.0, 1.0, 1.0, 1.0, 1.0);
        two_class_case(65, 35, 825, 75, 0.490, 0.4643, 0.65, 0.542, 0.4811);
        two_class_case(50, 50, 700, 200, 0.192, 0.2, 0.5, 0.286, 0.1666);
    }

    #[test]
    fn test_three_classes() {
        let mut cm = new();
        for _ in 0..10 {
            cm.add_for("red", "red");
        }
        for _ in 0..7 {
            cm.add_for("red", "blue");
        }
        for _ in 0..5 {
            cm.add_for("red", "green");
        }
        for _ in 0..20 {
            cm.add_for("blue", "red");
        }
        for _ in 0..5 {
            cm.add_for("blue", "blue");
        }
        for _ in 0..15 {
            cm.add_for("blue", "green");
        }
        for _ in 0..30 {
            cm.add_for("green", "red");
        }
        for _ in 0..12 {
            cm.add_for("green", "blue");
        }
        for _ in 0..8 {
            cm.add_for("green", "green");
        }

        assert_eq!(vec!["blue", "green", "red"], cm.labels());
        assert_eq!(112, cm.total());
        assert_eq!(10, cm.true_positive("red"));
        assert_eq!(12, cm.false_negative("red"));
        assert_eq!(50, cm.false_positive("red"));
        assert_eq!(13, cm.true_negative("red"));
        assert_eq!(5, cm.true_positive("blue"));
        assert_eq!(35, cm.false_negative("blue"));
        assert_eq!(19, cm.false_positive("blue"));
        assert_eq!(18, cm.true_negative("blue"));
        assert_eq!(8, cm.true_positive("green"));
        assert_eq!(42, cm.false_negative("green"));
        assert_eq!(20, cm.false_positive("green"));
        assert_eq!(15, cm.true_negative("green"));
    }

    #[test]
    fn check_traits_clone_eq() {
        let mut cm = new();
        cm[("pos", "pos")] = 3;
        cm[("pos", "neg")] = 2;
        cm[("neg", "neg")] = 4;
        cm[("neg", "pos")] = 5;
        let mut cm_clone = cm.clone();

        assert_eq!(cm.total(), cm_clone.total());
        assert_eq!(14, cm.total());
        assert_eq!(14, cm_clone.total());
        assert_eq!(cm, cm_clone);

        cm_clone.add_for("pos", "pos");
        assert_eq!(14, cm.total());
        assert_eq!(15, cm_clone.total());
        assert!(cm != cm_clone);
    }

    #[test]
    fn check_default() {
        let cm = super::ConfusionMatrix::default();
        assert_eq!(0, cm.total());
    }
}

