use crate::{Tokenize, TokenID};
use pyo3::prelude::*;
use hashbrown::HashMap;

#[pyclass]
pub struct BpeTokenizer {
    merge_record: HashMap<(TokenID, TokenID), TokenID>,
    vocab_table: HashMap<TokenID, Vec<u8>>,
}

#[pymethods]
impl BpeTokenizer {
    #[new]
    pub fn train(text: &str, merge_size: usize) -> BpeTokenizer {
        let merge_record = BpeTokenizer::merge_token(text, merge_size);
        let mut rev_merge_record = merge_record.iter()
            .map(|((t1, t2), nt)| (*nt, (*t1, *t2))).collect::<Vec<_>>();
        rev_merge_record.sort_by_key(|(nt, _)| *nt);

        let vocab_table = BpeTokenizer::build_vocab_table(&rev_merge_record);
        
        BpeTokenizer { merge_record, vocab_table }
    }

    pub fn encode(&self, text: &str) -> Vec<TokenID> {
        Tokenize::encode(self, text)  
    }

    pub fn decode(&self, token_ids: Vec<TokenID>) -> String {
        Tokenize::decode(self, &token_ids)
    }
}

impl Tokenize for BpeTokenizer {
    fn encode(&self, text: &str) -> Vec<TokenID> {
        let mut tokens = BpeTokenizer::text_to_tokens(text);
        while tokens.len() >= 2 {
            if let Some(pair) = tokens.windows(2)
                .map(|p| (p[0], p[1]))
                .filter(|pair| self.merge_record.contains_key(pair))
                .min_by_key(|pair| self.merge_record[pair])
            {
                tokens = BpeTokenizer::merge_pair_to_token(&tokens, pair, self.merge_record[&pair]);
            } else {
                break;
            }
        }

        tokens
    }

    fn decode(&self, token_ids: &[TokenID]) -> String {
        let bytes: Vec<_> = token_ids.iter()
            .flat_map(|id| self.vocab_table.get(id).unwrap().iter().cloned()).collect();
        String::from_utf8_lossy(&bytes).into() 
    }
}

impl BpeTokenizer {
    fn merge_token(text: &str, merge_size: usize) -> HashMap<(TokenID, TokenID), TokenID> {
        // Convert the text to tokens
        let mut tokens = BpeTokenizer::text_to_tokens(text);
        let mut merge_records = HashMap::<(TokenID, TokenID), TokenID>::new();
        
        // Merge loop
        for i in 1..=merge_size {
            // Get most popular byte pair
            if let Some(most_popular_pair) = BpeTokenizer::get_most_popular_pair(&tokens) {
                // Update all most most pair to a new token~
                // New token take from u8::MAX, so it must un used in origin text tokens
                let new_token = i as TokenID + u8::MAX as TokenID;
                tokens = BpeTokenizer::merge_pair_to_token(&tokens, most_popular_pair, new_token);
                // Record the mapping from `most_popular_pair` to `new_token`
                merge_records.insert(most_popular_pair, new_token);
            } else {
                break;
            }
        }

        merge_records
    }

    fn build_vocab_table(rev_merge_record: &[(TokenID, (TokenID, TokenID))]) -> HashMap<TokenID, Vec<u8>> {
        let mut vocab_table = HashMap::new();

        // For origin byte
        for byte in 0..=u8::MAX {
            vocab_table.insert(byte as TokenID, vec![byte]);
        } 

        // For new tokens
        for (new_token, (t1, t2)) in rev_merge_record {
            assert!(new_token > t1 && new_token > t2);
            let mut bytes = vec![];
            bytes.extend( vocab_table.get(t1).unwrap().iter() );
            bytes.extend( vocab_table.get(t2).unwrap().iter() );
            vocab_table.insert(*new_token, bytes);
        }

        vocab_table
    }

    fn text_to_tokens(text: &str) -> Vec<TokenID> {
        text.bytes().map(|b| b as TokenID).collect()
    }

    fn get_most_popular_pair(tokens: &[TokenID]) -> Option<(TokenID, TokenID)> {
        let mut counter = HashMap::<(TokenID, TokenID), usize>::new();
        for pair in tokens.windows(2) {
            let value = counter.entry((pair[0], pair[1])).or_insert(0);
            *value += 1;
        }
        counter.iter()
            .max_by_key(|&(_, c)| c)
            .map(|(pair, _)| *pair)
    }

    fn merge_pair_to_token(tokens: &[TokenID], pair: (TokenID, TokenID), new_token: TokenID) -> Vec<TokenID> {
        let mut new_tokens = vec![];
        let mut i = 0;
        while i < tokens.len() {
            if i < tokens.len() -1 && (tokens[i], tokens[i + 1]) == pair {
                new_tokens.push(new_token);
                i += 2;
            } else {
                new_tokens.push(tokens[i]);
                i += 1;
            }
        }
        new_tokens
    }
}


#[pymodule]
pub fn rbpe(module: &Bound<'_, PyModule>) -> PyResult<()> {
    module.add_class::<BpeTokenizer>()?;
    Ok(())
}

#[allow(unused)]
#[cfg(test)]
mod test {
    use super::*;

    #[test]
    fn test_merge_pair_to_token() {
        let tokens = [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4];
        let new_tokens = BpeTokenizer::merge_pair_to_token(&tokens, (3, 4), 10);
        assert_eq!(
            new_tokens,
            vec![1, 2, 10, 5, 6, 7, 1, 2, 10]
        );
    }

    #[test]
    fn test_get_most_popular_pair() {
        let tokens = [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 3, 4];
        let most_pair = BpeTokenizer::get_most_popular_pair(&tokens);
        assert_eq!(most_pair, Some((3, 4)))
    }

    #[test]
    fn test_train() {
        let tokenizer = BpeTokenizer::train(
        "春风拂面花满枝，流水潺潺鸟语时。山川如画心自远，岁月静好梦相依。", 100
        );
    }

    #[test]
    fn test_encode_decode() {
        let tokenizer = BpeTokenizer::train(
            "春风拂面花满枝，流水潺潺鸟语时。山川如画心自远，岁月静好梦相依。", 5
            );

        let origin_texts = [
            "Learn about language model tokenization",
            "OpenAI's large language models process text using tokens, which are common sequences of characters found in a set of text. The models learn to understand the statistical relationships between these tokens, and excel at producing the next token in a sequence of tokens",
            "春风拂面花满枝，流水潺潺鸟语时。山川如画心自远，岁月静好梦相依。",
            "「今日も一日頑張りましょう！💪✨🌞」",
        ];

        for origin_test in origin_texts {
            let ids = Tokenize::encode(&tokenizer, origin_test);
            let text =  Tokenize::decode(&tokenizer, &ids);
            println!("{text}");
            assert_eq!(origin_test, text);
        }
    }
}