import time

import json
from tqdm import tqdm
from glob import glob
from collections import Counter
import os
from fish_tool import logs
import random


def iter_tnews(num=None, is_shuffle=False):
    logs.tmp.info(f'iter_tnews...')
    ptn = 'E:/code/data/common/THUCNews数据集/THUCNews/*/*'
    paths = glob(ptn)
    if is_shuffle:
        random.shuffle(paths)
    paths = paths[:num]
    total = len(paths)
    data = (open(path, encoding='utf8').read() for path in paths)
    return total, data


def count_tnews_char():
    os.makedirs('data', exist_ok=True)
    count_path = 'data/tnews_char__count.json'
    char__count = Counter()
    total, data = iter_tnews()
    for i, txt in enumerate(tqdm(data, total=total)):
        char__count.update(txt)
        if i % 10000 == 1 or i == total - 1:
            logs.tmp.info(f'save i = {i}')
            with open(count_path, 'w', encoding='utf8') as f:
                json.dump(dict(char__count), f, ensure_ascii=False, indent=2)


def iter_novel(num=None, is_shuffle=False):
    logs.tmp.info(f'iter_novel...')
    ptn = 'D:/spider/qidian_app_spider/data/novel_content/*/chapter/*.json'
    paths = glob(ptn)
    if is_shuffle:
        random.shuffle(paths)
    paths = paths[:num]
    total = len(paths)

    def out(paths):
        for path in paths:
            doc = json.load(open(path, encoding='utf8'))
            if 'Data' in doc and 'Content' in doc['Data']:
                yield doc['Data']['Content']
            else:
                yield None

    data = out(paths)
    return total, data


def count_novel_char():
    os.makedirs('data', exist_ok=True)
    save_path = 'data/novel_char__count.json'
    char__count = Counter()
    total, data = iter_novel(num=None)
    for i, txt in enumerate(tqdm(data, total=total)):
        if txt:
            char__count.update(txt)
        if i % 10000 == 1:
            logs.tmp.info(f'save i = {i}')
            with open(save_path, 'w', encoding='utf8') as f:
                json.dump(dict(char__count), f, ensure_ascii=False, indent=2)
    with open(save_path, 'w', encoding='utf8') as f:
        json.dump(dict(char__count), f, ensure_ascii=False, indent=2)


def iter_weibo(num=None, is_shuffle=False):
    logs.tmp.info(f'iter_weibo...')

    def clear(line):
        if line:
            ps = line.split('\t')
            if len(ps) > 19:
                return ps[18]
        return ''

    def out(num):
        i = 0
        path = 'E:/code/data/common/500万微博语料/weibo.txt'
        f = open(path, encoding='utf8')
        while True:
            line = f.readline()
            if line:
                if i == 0:
                    txt = ''
                else:
                    txt = clear(line)
                yield txt
            else:
                break
            i += 1
            if i >= num:
                break

    data = out(num)
    return num, data


def count_weibo_char():
    os.makedirs('data', exist_ok=True)
    count_path = 'data/weibo_char__count.json'

    char__count = Counter()
    total, data = iter_weibo(num=None)
    for i, txt in enumerate(tqdm(data, total=total)):
        if txt:
            char__count.update(txt)
        if i % 10000 == 1:
            logs.tmp.info(f'save i = {i}')
            with open(count_path, 'w', encoding='utf8') as fw:
                json.dump(dict(char__count), fw, ensure_ascii=False, indent=2)
    with open(count_path, 'w', encoding='utf8') as fw:
        json.dump(dict(char__count), fw, ensure_ascii=False, indent=2)


def add_dict(a, b):
    out = a.copy()
    for char, count in b.items():
        out[char] = out.get(char, 0) + count
    return out


def merge_char():
    path = 'data/weibo_char__count.json'
    char__count1 = json.load(open(path, encoding='utf8'))
    path = 'data/tnews_char__count.json'
    char__count2 = json.load(open(path, encoding='utf8'))
    path = 'data/novel_char__count.json'
    char__count3 = json.load(open(path, encoding='utf8'))
    out = add_dict(char__count1, char__count2)
    out = add_dict(out, char__count3)
    path = 'data/merge_char__count.json'
    with open(path, 'w', encoding='utf8') as fw:
        json.dump(out, fw, ensure_ascii=False, indent=2)


def sort_merge():
    path = 'data/merge_char__count.json'
    char__count = json.load(open(path, encoding='utf8'))
    cn__count = {}
    non_cn__count = {}
    for char, count in char__count.items():
        if check_is_cn_char(char):
            cn__count[char] = count
        else:
            non_cn__count[char] = count

    save_path = 'data/sort_中文.txt'
    fw = open(save_path, 'w', encoding='utf8')
    for char, count in sorted(cn__count.items(), key=lambda x: -x[1]):
        fw.write(f'{char}\t\t{count}\n')

    save_path = 'data/sort_非中文.txt'
    fw = open(save_path, 'w', encoding='utf8')
    for char, count in sorted(non_cn__count.items(), key=lambda x: -x[1]):
        fw.write(f'{char}\t\t{count}\n')


def check_is_cn_char(char):
    # 检查是否是中文字符
    # char = char.decode('utf-8')
    return u'\u4e00' <= char <= u'\u9fff'


if __name__ == '__main__':
    # count_novel_char()
    # merge_char()
    # sort_merge()
    pass
