r'''
根据统计得到的信息做卡方区分度，并按照区分度排序，列出最应该关注的词。

卡方区分度公式：

.. math:: S_{t, c} = \frac{N \times (AD-CB)^2}{(A+C)(B+D)(A+B)(C+D)}

- $N$为总文档数量

- $A$为包含词条t且属于类别c的文档数量

- $B$为包含词条t且不属于类别c的文档数量

- $C$为不包含词条t且属于类别c的文档数量

- $D$为不包含词条t且不属于类别c的文档数量
'''

import os
import json

from tqdm import tqdm

from count_chi import OUTPUT_DIR as INPUT_DIR

print(f'{INPUT_DIR = }')

with open(os.path.join(INPUT_DIR, 'chi.json'), 'r', encoding = 'utf-8') as file:
    INFO: dict[str, dict[str, int]] = json.load(file)
    '''`INFO[label][word] = count`，`count`为文档计数。'''

N = sum(sum(wordDict.values()) for wordDict in INFO.values())
print(f'{N = }')

def getScore(t: str, c: str):
    'getScore(word, label)'
    A = INFO.get(c, dict()).get(t, 0)
    B = sum(INFO.get(label, dict()).get(t, 0) for label in INFO.keys() - {c})
    C = INFO.get(c, dict()).get(f'no-{t}', 0)
    D = sum(INFO.get(label, dict()).get(f'no-{t}', 0) for label in INFO.keys() - {c})
    divider = (A + C) * (B + D) * (A + B) * (C + D)
    return N * (A * D - C * B) ** 2 / max(divider, 1)

def main():
    for label in INFO.keys():
        data = [(word, getScore(word, label)) for word in tqdm(
            filter(
                lambda word: not word.startswith('no-'), 
                INFO.get(label).keys(),
            ), 
            label,
        )]
        data.sort(key = lambda item: -item[-1])
        with open(
            os.path.join(INPUT_DIR, f'{label}-words.txt'), 
            'w', encoding = 'utf-8',
        ) as file:
            for word, score in tqdm(data, label):
                file.write(json.dumps({
                    'word': word, 'score': score,
                }, ensure_ascii = False) + '\n')

if __name__ == '__main__':
    main()
