'''
3090上获取数据:

```{bash}
cd /data/private/yanruotian/cpm-ant-plus/summary
python3 sample.py -i /data/private/yanruotian/cpm-ant-plus/cpm-live/examples/outputs/jsonl/cpm-ant-plus-10b/bs\=4/lr\=2e-3/seed\=100/predicts -o 抖音-2022-sample.jsonl
```
'''

import os
import json
import string
import wordcloud

from math import sqrt
from typing import TypeVar, Callable, Iterable
from count_chi import TOPIC_CLASSES
from itertools import product
from collections import defaultdict

from count_chi import OUTPUT_DIR as INPUT_DIR

U = TypeVar('U')

print(f'input dir = {INPUT_DIR}')
OUTPUT_DIR = os.path.join(INPUT_DIR, 'pngs')
os.makedirs(OUTPUT_DIR, exist_ok = True)

FILE_PATH = os.path.abspath(__file__)
DEFAULT_STOP_FILE = os.path.join(os.path.dirname(FILE_PATH), 'stop-words.txt')

_stopWords: set[str] | None = None
def getStopWords():
    global _stopWords
    if _stopWords is None:
        _stopWords = set()
        for filePath in {DEFAULT_STOP_FILE}:
            with open(filePath, 'r', encoding = 'utf-8') as file:
                _stopWords.update(line.strip().lower() for line in file)
    return _stopWords

def yieldWords(file: Iterable[str]):
    for line in file:
        obj = json.loads(line)
        result = obj.get('word')
        if result is not None:
            yield result
        else:
            print(f'line "{line}" not a json with "word"')

def myFilter(
    function: Callable[[U], bool], 
    iterable: Iterable[U], 
    maxCount: int = -1
):
    count = 0
    for item in filter(function, iterable):
        yield item
        count += 1
        if 0 <= maxCount <= count:
            break

def drawFile(labels: set[str]):
    wordDict = defaultdict(int)
    for label in labels:
        infoPath = os.path.join(INPUT_DIR, f'info-{label}.json')
        if not os.path.isfile(infoPath):
            print(
                f'err drawing labels {labels}:\n'
                f'    info json path "{infoPath}" not a file'
            )
            return
        with open(infoPath, 'r', encoding = 'utf-8') as file:
            _wordDict: dict[str, int] = json.load(file)
            for word, count in _wordDict.items():
                wordDict[word] += count
    wc = wordcloud.WordCloud(
        # r'C:\windows\Fonts\simsun.ttc',  # simsun.ttc, simkai.ttf, simhei.ttf
        '/mnt/c/windows/Fonts/simsun.ttc',
        stopwords = wordcloud.STOPWORDS | {
            '转发', '微博', '视频', '回复', '', '超话',
            '#', '@', r'[', r']', '/', '?', '...', '##',
            '…', '：', ':', '~', '.', '一个', '【', '】',
            '清华', '清华大学', '清华北大', '北大',
            '哈哈哈', '哈哈', '<', '>', r'(', r')', 
            '年', '说', '没有', '（', '）',
            '收藏', '网友', '点赞', '点击',
            '劳东燕', '河南', '银行', '储户',  # 清华教授劳东燕评论河南银行一事
            '啊啊啊', '是不是', '有没有',
            '奶气', '奶气来', '皮卡丘', '侦探',  # 高考招生相关
            '已经', '进入', '一起', 
            '称其以', '全文', '时间', '低过', '这份', '从未'  # 2019
            '允悲', '北大清华', '怪不得', '比较', '开玩笑',  # 2020
            '今天', '这场', '这条', '一次',  # 2020
            '发现', '一圈', '洗洗', '主页', '转过', '没人', # 2021
            '一下', '分享', '一丝丝', '真的',  # 2021
            '龙子', '发了', '两名', '中说', '不出', '偏少',  # 2022
            '看到', '需要', '一定', '获得', '专属', '红包',  # 2022
            '起来', '新鲜', '新年礼物', '送给', '好事', '新年',  # 2022
        } | getStopWords(),
        background_color = 'white',
        scale = 20,
        max_font_size = 25,
        width = 200,
        height = 200,
    )
    stopChars = {char for char in string.digits + string.ascii_letters + '.'}
    usedWords: list[str] = []
    for label in labels:
        with open(os.path.join(INPUT_DIR, f'{label}-words.txt'), 'r', encoding = 'utf-8') as file:
            usedWords.extend(myFilter(
                lambda word: (
                    len(word) >= 2
                    and word not in wc.stopwords 
                    and word is not None
                    and not all(char in stopChars for char in word)
                ),
                yieldWords(file),
                maxCount = 100,
            ))
    print(f'words for labels {labels}: {usedWords}')
    wc.generate_from_frequencies({
        usedWord: int(sqrt(wordDict.get(usedWord) + 2)) for usedWord in usedWords
    })  # 这么写是因为用词频信息生成的话该库会忽略之前设置的停用词
    wc.to_file(os.path.join(OUTPUT_DIR, f'词云图-{",".join(sorted(labels))}.png'))

def main():
    # classes = {f'{key}-{sent}' for key, sent in product(
    #     TOPIC_CLASSES.keys(), map(str, range(3))
    # )}
    classes = {'0', '1', '2'}
    # classes = {f'{year}-{label}' for year, label in product({'2019', '2020', '2021', '2022'}, {'0', '1', '2'})}
    for topicClass in classes:
        drawFile({topicClass})

if __name__ == '__main__':
    main()
