# encoding=utf8
import json
import re
import math
import codecs
from pymongo import MongoClient
from bson import json_util
from urllib.parse import quote_plus
from tqdm import tqdm

category = ['财务', '宠物', '法律', '健康', '教育', '科技', '旅行', '母婴', '情感', '社会科学', '社科',
            '时尚', '数码', '体育', '心理学', '艺术', '影视', '娱乐', '职场']
cate_dict = {cate: 0 for cate in category}
other_count = 0


def clean(text):
    text = re.sub('？，|，？', '？', re.sub('。，|，。', '。', re.sub('，+', '，', re.sub('\s+|</?(.+?)>|,', '，', text.strip()))))
    text = re.sub(r"@", ',', text)
    text = re.sub(r'↓+', '', text)
    text = re.sub(r'！+', '！', text)
    text = re.sub(r'？+', '？', text)
    text = re.sub(r'\\u2026+|\\u203C|\\u2014', '', text)
    text = re.sub(r'\\n|➕+', '，', text)
    text = re.sub(r'👀+|👉+|\\|🍺+|🙏🏾+|🙏+|👖+|🌟+|😭+|😂+|☀+|💛+|❤+|😄+|🤣+|😓+|💪+|😆+|🐶+|😍', '', text)
    text = re.sub(r"(回复)?(//)?\s*@\S*?\s*(:| |$)", " ", text)  # 去除正文中的@和回复/转发中的用户名
    text = re.sub(r"\[\S+\]", "", text)  # 去除表情符号
    text = re.sub(r"#\S+#", "", text)  # 保留话题内容
    URL_REGEX = re.compile(
        r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
        re.IGNORECASE)
    text = re.sub(URL_REGEX, "", text)  # 去除网址
    text = text.replace("转发微博", "")  # 去除无意义的词语
    text = re.sub(r"\s+", " ", text)  # 合并正文中过多的空格

    if text.startswith('，'):
        text = text[1:]

    return text.strip()


user = 'mongouser'
pwd = 'tuanjie123'
host = '118.25.106.14'
port = 27017
url = 'mongodb://%s:%s@%s' % (quote_plus(user), quote_plus(pwd), host)

client = MongoClient(url, port=port)
collection_task_test = client['tagging_test']['task_test']
collection_content = client['tagging_test']['content']

lines = collection_task_test.find(
    {
        'taskKey': 'ArticleClassify',
        'canOutput': 1,
        # 'checkDate': {'$gte': 1619798400000}
        'checkDate': {'$lt': 1666335840000}
    },
    {
        'taskId': 1,
        'newsId': 1,
        'checkData': 1,
        'checkDate': 1,
        '_id': 0},
    no_cursor_timeout=True,
    # max_time_ms=1*1000
)

# raise Exception('ting')
total = lines.count()
print(total)
# raise Exception("ting")

cut_num = 10000  # 每一万条数据村一个文件
file_index = 0  # 文件标志

save_list = []
success_count = 0
error_count = 0

for idx, line in tqdm(enumerate(lines), total=total):  # lines是动态读取mongo中的数据
    try:
        # print('pre_idx', idx)
        cur_news_id = line['newsId']
        # print(idx)
        article = list(collection_content.find({'newsId': cur_news_id}))[0]['content']
        # print(cur_news_id)
        title = article['title']
        content = article['content']
        line['article'] = {'title': title, 'content': clean(content)}
        cur_cate = line['checkData']['cate']
        if cur_cate in category:
            cate_dict[cur_cate] += 1
        else:
            other_count += 1
    except:
        # print('here')
        error_count += 1
        if (idx + 1) % cut_num == 0:  # 达到存储要求
            # 将数据写入文件
            with codecs.open('./zhihu_json_file/json_' + str(file_index) + '.json', 'w', encoding='utf-8') as f:
                json.dump(save_list, f, ensure_ascii=False, indent=4)
            # 清空列表
            save_list = []
            # 文件标志加一
            file_index += 1
            last_save = idx

        continue

    # print(idx)
    if (idx + 1) % cut_num == 0:  # 达到存储要求
        # 将数据写入文件
        with codecs.open('./zhihu_json_file/json_' + str(file_index) + '.json', 'w', encoding='utf-8') as f:
            json.dump(save_list, f, ensure_ascii=False, indent=4)
        # 清空列表
        save_list = []
        # 文件标志加一
        file_index += 1
        last_save = idx

    save_list.append(line)
    success_count += 1

# 判断是否有多余数据
if save_list:
    with codecs.open('./zhihu_json_file/json_' + str(file_index) + '.json', 'w', encoding='utf-8') as f:
        json.dump(save_list, f, ensure_ascii=False, indent=4)

print(success_count)
print(error_count)
print(cate_dict)
print('其他: {}'.format(str(other_count)))
