# -*- coding: utf-8 -*-
"""
# 使用了 [jieba==0.42.1]，遵循其 [MIT] 许可证，原始代码来源：[https://github.com/fxsjy/jieba]
# 使用了 [pandas==2.0.3]，遵循其 [BSD 3-Clause License] 许可证，原始代码来源：[https://pandas.pydata.org]
# 使用了 [scipy==1.10.1]，遵循其 [BSD License] 许可证，原始代码来源：[https://scipy.org/]
# 使用了 [tqdm==4.67.1]，遵循其 [MIT] 许可证，原始代码来源：[https://tqdm.github.io]
"""
import os,sys
from pathlib import Path
from typing import Any,Dict,List,Tuple
import jieba.analyse as analyse
import pandas as pd
from scipy.sparse import csr_matrix
from multiprocessing import Pool
from tqdm import tqdm
class PARA_CONFIG:
    CURRENT_DIR = Path(__file__).parent.resolve()
    MD_DIR = CURRENT_DIR.parent / "md_files"#MD files directory. change if neede.
    STOPWORDS_PATH = CURRENT_DIR.parent / "docs" / "stopwords.txt"#change the stopwords if needed.
    APPEARTIMES: int = 5
    N_KEYWORDS: int = 20#number of keywords of each paper.
    SCALE_RATE: float = 100_000_000.0
    ANALYSIS_KEYS: List[str] = ['n','nr','ns','nt','nw','nv','v','vn','an','av','dv']#change the word types if needed.
    FUNC_PATH=CURRENT_DIR.parent.parent.resolve()/"about_file"
sys.path.append(str(PARA_CONFIG.FUNC_PATH))
import f_basic
def get_tfidf(text_in: str = "") -> Tuple[List[str], List[float]]:
    li_tags = analyse.extract_tags(sentence=text_in,topK=PARA_CONFIG.N_KEYWORDS,withWeight=True,allowPOS=PARA_CONFIG.ANALYSIS_KEYS)
    total_weight = sum(w for _, w in li_tags)
    scale_factor = PARA_CONFIG.SCALE_RATE / total_weight
    li_o = [(k, w * scale_factor) for k, w in li_tags if k and pd.notna(k)]
    return tuple(zip(*li_o)) if li_o else ([], [])
def process_file(fp: Path) -> Dict[str, Any]:
    with open(fp, 'r', encoding='utf-8', errors='replace') as f:
        text = ''.join(line.strip() for line in f if line.strip())
        clean_text = f_basic.Config.PAT_CLEAN.sub("", text)
        keys, weights = get_tfidf(clean_text)
        return {'title': fp.stem, **dict(zip(keys, weights))}
@f_basic.Timer
def do_tfidf(max_workers=os.cpu_count()*2) -> Tuple[csr_matrix, List[str], List[str]]:
    """TF-IDF analyse start."""
    analyse.set_stop_words(str(PARA_CONFIG.STOPWORDS_PATH))
    li_md_files=list(Path(PARA_CONFIG.MD_DIR).rglob("*.md"))
    with Pool(min(max_workers, len(li_md_files))) as pool:
        results=list(tqdm(pool.imap(process_file,li_md_files),total=len(li_md_files),desc="Processing"))
    valid_results = [r for r in results if r and 'title' in r and r['title']]
    df=pd.DataFrame(valid_results)
    df = df.fillna(0.0).astype(float,errors='ignore')
    mask = df.ne(0.0).sum().ge(PARA_CONFIG.APPEARTIMES)#keep the topic words that appears more than 5 tims.
    return f_basic.save_dataframe(df.loc[:, mask], prefix="1_ICT_TFIDF")

if __name__=="__main__":
    do_tfidf()