#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/4/28 14:22

"""
处理数据，提取预测数据中的博文特征
"""

'''
最终格式（JSON）：
{
    00001(uid): {
        ...
        blogs: {
            11111: {
                ...
            },
            22222: {
                ...
            },
        },
    },
}
'''

import time
import jieba
import json
import pandas as pd

jieba.initialize()  # 手动初始化（可选）

train_data_file = "../../Data/weibo_predict_data.txt"
# train_slim_data_file = "../Data/train_slim_data.txt"  # 测试程序用的少量数据

content_feature_1_file = "../../Data/predict/content_feature_1.json"  # 原始数据整理成json
content_feature_2_file = "../../Data/predict/content_feature_2.txt"  # 词频前1000的词
content_feature_3_file = "../../Data/content_feature_3.txt"  # 根据词频筛选后的关键词
content_feature_4_file = "../../Data/predict/content_feature_4.json"  # 最终结果

users = {}

def get_json():
    all_words = []
    with open(train_data_file, 'rb') as f:
        line_cnt = 0
        lines = f.readlines()
        for l in lines:
            line_cnt += 1
            if line_cnt % 10000 == 0:
                print(f"已经处理 {line_cnt} 行数据")
            line = l.decode('utf-8')
            # line = line.strip()
            fields = line.split("\t", 3)  # 分解为6个字段，限制为6是防止博文内容含有\t
            if len(fields) != 4:
                print(f"line {line_cnt} has error: {line}   {fields}")
            else:
                uid = fields[0]
                mid = fields[1]

                # timestamp = time.mktime(time.strptime(fields[2], '%Y-%m-%d %H:%M:%S'))  # 字符串->时间元组->时间戳（10位浮点）
                content = fields[3].strip()

                words = jieba.lcut(content)
                all_words.extend(words)

                if uid in users:
                    users[uid]["blogs"][mid] = {
                        "raw": content,
                        "seg": words,
                    }
                else:
                    users[uid] = {
                        "blogs": {
                            mid: {
                                "raw": content,
                                "seg": words,
                            }
                        }
                    }
                # print(users[uid])
        print(f"读取完成，共计 {line_cnt} 行")

        # ======= 写入json ===========：
        # jsonStr = json.dumps(users, ensure_ascii=False)
        # with open(content_feature_1_file, "w", encoding='utf-8') as f1:
        #     f1.write(jsonStr)
    return all_words

def get_word_frequency():
    # ========= 统计词频 ============：
    all_words = get_json()
    print('正在统计词频')
    corpus = pd.DataFrame(all_words, columns=['word'])
    corpus['cnt'] = 1

    g = corpus.groupby(['word']).agg({'cnt': 'count'}).sort_values('cnt', ascending=False)
    heads = g.head(10000)

    with open(content_feature_2_file, "w", encoding='utf-8') as f2:
        for index, row in heads.iterrows():
            # print(f"{index}\t{row['cnt']}\n")
            f2.write(f"{index}\t{row['cnt']}\n")
    print('完成统计词频')

def get_keywords():
    # =========读取筛选过的关键词=======
    keywords = []
    with open(content_feature_3_file, "r", encoding='utf-8') as f3:
        lines = f3.readlines()[:30]
        for line in lines:
            word = line.strip().split('\t')
            if len(word)>0:
                keywords.append(word[0])
        # print(keywords)
    print(f"获取 {len(keywords)} 个关键词：{keywords}")
    return keywords

import re
def process_data():
    keywords = get_keywords()
    pattern_theme = re.compile(r"\#.+\#")
    pattern_at = re.compile(r"\@")
    pattern_forward = re.compile(r"【转发】")

    all_users = users
    # with open(content_feature_1_file, "r", encoding='utf-8') as f1:
    #     all_users = json.load(f1)
    # print(all_users)

    user_num = len(all_users.items())
    print(f'共 {user_num} 个用户')

    user_cnt = 0
    for user in all_users:
        user_cnt += 1
        if user_cnt % 100 == 0:
            print(f"正在处理第 {user_cnt}/{user_num} 个用户：{user_cnt/user_num*100}%")
        for mid in all_users[user]["blogs"]:
            # 关键词：
            for k in keywords:
                raw = all_users[user]["blogs"][mid]['raw']
                all_users[user]["blogs"][mid][f"key_{k}"] = len(re.findall(k, raw))
            # 是否有主题（但不一定准确吧）
            all_users[user]["blogs"][mid]["has_theme"] = (pattern_theme.search(raw) != None)
            # 是否at（@）
            all_users[user]["blogs"][mid]["has_at"] = (pattern_at.search(raw) != None)
            # 是否转发（【转发】）
            all_users[user]["blogs"][mid]["has_forward"] = (pattern_forward.search(raw) != None)

    # print(all_users)

    # ======= 写入json ===========：
    print(f"已处理完 {user_cnt} 个用户，正在写入 json，这一过程可能持续一两分钟")
    jsonStr = json.dumps(all_users, ensure_ascii=False)
    with open(content_feature_4_file, "w", encoding='utf-8') as f4:
        f4.write(jsonStr)
    print("写入完成")


'''
记得设置本程序最开头的文件路径：
train_data_file：原始数据文件路径
content_feature_3_file：关键词文件路径
content_feature_4_file：最终结果（json）输出路径
'''
if __name__ == "__main__":
    # 处理为json，并获取词频：
    # get_word_frequency()
    # 手动筛选完关键词后，开始统计特征：
    # process_data()

    # ===已经有关键词了（有第3个文件了），只需如下步骤：=====
    get_json()
    process_data()

    exit(0)



