//! @Author: DengLibin
//! @Date: Create in 2023-12-19 11:13:03
//! @Description: 朴素贝叶斯
//! P(A|B) = P(A,B)/P(B)  B发生的前提下A发生的概率 = A,B同时发生的概率 / B发生的概率
//! P(B|A) = P(A,B)/P(A)
//! => P(B|A) = [P(A|B)P(B)]/P(A)
//! P(W1,W2) = P(W1) * P(W2)
//! 使用朴素贝叶斯进行文本分类
//! 步骤：
//! 1.加载训练数据
//! 3.每行数据进行分词，并统计词频 TF = 单词出现的次数 / 文档中的总单词数
//! 4.统计所有分类下的词频（汇总）
//! 5.统计单词的逆向文档频率 IDF = log(文档总数 / (该单词出现的文档数 + 1)))

use std::collections::{HashMap, HashSet};
use std::sync::Arc;

use crate::ml::jieba;
use jieba_rs::Jieba;
use log::{error, info};

use crate::global::{self, GlobalResult};

/// @Author: DengLibin
/// @Date: Create in 2023-12-19 15:16:50
/// @Description: 朴素贝叶斯 演示
pub async fn run(jieba: Arc<Jieba>) -> GlobalResult<()> {
    //数据集
    let mut data_set: Vec<TextCat> = Vec::new();
    //读取训练数据
    let v = read_data("./data_set/beyes/toutiao_cat_demo.txt").await?;
    //停用词
    let stop_words = read_data("./data_set/beyes/stop.txt").await?;
    
    //转set
    let mut stop_words_set: HashSet<String> = HashSet::new();
    for  stop_word in stop_words {
        stop_words_set.insert(stop_word);
    } 
    println!("停用词大小:{}", stop_words_set.len());
    
    

    for line in v {
        let text_cat = parse2textcat(jieba.clone(), line, &stop_words_set).await;
        data_set.push(text_cat);
    }
    drop(stop_words_set);
    
    info!("训练数据量:{}", data_set.len());
    //单词的idf值
    let mut idf_map_count: HashMap<String, usize> = HashMap::new(); //单词-单词出现的文档数
    let mut idf_map: HashMap<String, f32> = HashMap::new();
    //文档总数
    let doc_count = data_set.len();
    //计算单词的idf 值
    //单词出现的文档数
    for row in data_set.iter() {
        //当前行单词词频
        let word_count_map = &row.word_count;
        println!("----------------------------词频:{}---------------------------------", row.cat);
        //单词
        for kv in word_count_map {
            print!("{}={}; ", kv.0, kv.1);
            for read_row in data_set.iter() {
                if read_row.word_count.contains_key(kv.0) {
                    //当前文档包含这个单词
                    let count = idf_map_count.entry(String::from(kv.0)).or_insert(0_usize);
                    let x = *count;
                    *count = x + 1;
                }
            }
        }
        println!();
    }
    //计算idf
    for item in idf_map_count.iter() {
        let idf = (doc_count as f32 / (item.1 + 1) as f32).log2();
        idf_map.insert(String::from(item.0), idf);
    }

    //计算每行数据每个单词的df-idf值
    for row in data_set.iter_mut() {
        //当前行单词词频
        let word_count_map = &row.word_count;
        //单词
        for kv in word_count_map {
            let idf_o = idf_map.get(kv.0);
            let df = (*kv.1 as f32) / row.word_total as f32;
            match idf_o {
                Option::Some(idf) => {
                    //计算df-idf值
                    let df_idf = df * idf;
                    //保存
                    row.word_df_idf.insert(kv.0.clone(), df_idf);
                    println!("df-idf值:{}={}", kv.0, df_idf)
                }
                Option::None => {
                    error!("idf值不存在:{}", kv.0)
                }
            }
        }
    }
    Ok(())
}

/// @Author: DengLibin
/// @Date: Create in 2023-12-19 15:18:18
/// @Description: 读取数据
async fn read_data(data_path: &str) -> GlobalResult<Vec<String>> {
    let mut v: Vec<String> = Vec::new();
    let r = rust_common::tokio_file::read_by_line(data_path, |line| {
        v.push(line);
    })
    .await;
    global::check_result(r)?;
    Ok(v)
}

/// @Author: DengLibin
/// @Date: Create in 2023-12-19 16:04:56
/// @Description: 解析行
async fn parse2textcat(jieba: Arc<Jieba>, line: String, stop_words_set: &HashSet<String>) -> TextCat {
    let split = line.split("_!_");
    let mut i: usize = 0;
    let mut text_cat = TextCat {
        cat: "".into(),
        word_count: HashMap::new(),
        word_total: 0_usize,
        word_df_idf: HashMap::new(),
    };
    let mut line_words: Vec<&str> = Vec::new();
    for item in split {
        if i == 2 {
            //分类名称
            text_cat.cat = item.into();
        }
        if i == 3 {
            // 文本内容
            //分词
            let  words = jieba::cut(jieba.clone(), item);
            for word in words {
                //停用词
                if stop_words_set.contains(word) {
                    continue;
                }
                //加入line_words
                line_words.push(word);
            }
            
        }
        if i == 4 {
            //逗号分隔的词语
            let mut words = item.split(",").collect::<Vec<&str>>();
            //加入line_words
            line_words.append(&mut words);
        }

        i += 1;
    }

    //统计词频
    for word in line_words {
        let count = text_cat.word_count.entry(word.into()).or_insert(0_usize);
        let x = *count;
        *count = x + 1;
        text_cat.word_total += *count; //总数累加
    }

    text_cat
}

struct TextCat {
    cat: String,                        //类别
    word_count: HashMap<String, usize>, //词频 TF
    word_total: usize,                  //总单词数
    word_df_idf: HashMap<String, f32>,  //df-idf值
}
