'''
统计卡方区分度的必要信息。
'''

import os
import json
import jieba
import operator

from tqdm import tqdm
from functools import reduce
from collections import defaultdict

INPUT_FILE = os.getenv('INPUT_FILE') or 'data/sample-5000.jsonl'
OUTPUT_DIR = os.path.join(
    os.path.dirname(INPUT_FILE),
    '.'.join(os.path.basename(INPUT_FILE).split('.')[: -1]),
)
os.makedirs(OUTPUT_DIR, exist_ok = True)

TOPIC_CLASSES = {
    '高考招生': ['1', '3'],
    '科技创新': ['4', '5'],
    '精神风貌': ['9', '11'],
    '人才建设': ['6', '10'],  # 含有师资队伍，或换成“人才建设”？
    '典礼活动': ['14'],
    '公共卫生': ['16'],
    '无关': ['0', '2', '7', '8', '12', '14', '15'],
}

TOPIC_DICT: dict[str, str] = dict(reduce(operator.add, (
    [(value, key) for value in values] 
        for key, values in TOPIC_CLASSES.items()
)))

def getTopicClass(topicDict: dict[str, float]) -> str:
    scores: defaultdict[str, float] = defaultdict(float)
    for topic, value in topicDict.items():
        scores[TOPIC_DICT.get(topic, 'null')] += value
    return max(scores.keys(), key = lambda key: scores.get(key))

def getClass(obj: dict[str, str | dict]):
    # date = obj.get('发表时间')[: len('xxxx-xx-xx')]
    # year = date[: len('xxxx')]
    # if year == '2021':
    #     dateBegin = '07-01'
    #     dateEnd = '09-01'
    # else:
    #     dateBegin = '06-01'
    #     dateEnd = '09-01'
    # if not (dateBegin <= date[-len('xx-xx'): ] < dateEnd):
    #     return 'null'
    # else:
    #     return year + '-' + obj.get('predict')
    return str(obj.get('predict'))
    return getTopicClass(obj.get('lda-topics')) + '-' + obj.get('predict')

def yieldFile(filePath: str, onlyWords: bool = False):
    with open(filePath, 'r', encoding = 'utf-8') as file:
        for line in tqdm(file):
            obj = json.loads(line)
            label = None if onlyWords else getClass(obj)
            content = obj.get('content')
            words = {word.strip().lower() for word in jieba.cut(content)}
            yield label, words

def main():
    print(f'{TOPIC_DICT = }')
    allWords: set[str] = reduce(
        operator.or_, 
        (words for _, words in yieldFile(INPUT_FILE, onlyWords = True)), 
        set(),
    )
    print(f'totally {len(allWords)} words')
    infoDict = defaultdict(lambda: defaultdict(int))
    '`infoDict[label][word] = count`'
    for label, words in yieldFile(INPUT_FILE):
        for word in words:
            infoDict[label][word] += 1
        for word in allWords - words:
            infoDict[label][f'no-{word}'] += 1
    with open(os.path.join(OUTPUT_DIR, 'chi.json'), 'w', encoding = 'utf-8') as file:
        json.dump(infoDict, file, ensure_ascii = False)

if __name__ == '__main__':
    main()
