#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/4/29 22:49

"""
"""

import torch
# torchvision 包含了一些常用数据集，其输出是 PILImage 图像，值为 [0,1]

import os
import json
import re
import time
import datetime
import chinese_calendar
import jieba

from MLP import MLPtrain

model_dir = 'saved_model'
predict_data_file = "../../Data/weibo_predict_data.txt"
data_file = "../../Data/feature_sum.json"

predict_results_file = "../../Data/predict_results.txt"

forward_net = MLPtrain.WeiboNet()
comment_net = MLPtrain.WeiboNet()
like_net = MLPtrain.WeiboNet()

forward_net.load_state_dict(torch.load(os.path.join(model_dir, "ForwardNet.pth")))
comment_net.load_state_dict(torch.load(os.path.join(model_dir, "CommentNet.pth")))
like_net.load_state_dict(torch.load(os.path.join(model_dir, "LikeNet.pth")))


def get_keywords():
    content_feature_3_file = "../../Data/content_feature_3.txt"  # 根据词频筛选后的关键词
    # =========读取筛选过的关键词=======
    keywords = []
    with open(content_feature_3_file, "r", encoding='utf-8') as f3:
        lines = f3.readlines()[:30]
        for line in lines:
            word = line.strip().split('\t')
            if len(word)>0:
                keywords.append(word[0])
        # print(keywords)
    print(f"获取 {len(keywords)} 个关键词：{keywords}")
    return keywords

test_samples = []

keywords = get_keywords()
pattern_theme = re.compile(r"\#.+\#")
pattern_at = re.compile(r"\@")
pattern_forward = re.compile(r"【转发】")

users = {}
print("正在读取原始数据")

global_ave_for = 0
global_ave_like = 0
global_ave_com = 0
global_user_cnt = 0

with open(data_file, "r", encoding='utf-8') as f:
    users = json.load(f)
    for uid in users:
        global_user_cnt += 1
        global_ave_for += users[uid]['ave_for']
        global_ave_like += users[uid]['ave_like']
        global_ave_com += users[uid]['ave_com']
    global_ave_for /= float(global_user_cnt)
    global_ave_like /= float(global_user_cnt)
    global_ave_com /= float(global_user_cnt)
print("读取原始数据完成")

with torch.no_grad():
    with open(predict_results_file, "w", encoding='utf-8') as result_file:
        with open(predict_data_file, 'rb') as f:
            line_cnt = 0
            lines = f.readlines()
            for l in lines:
                line_cnt += 1
                if line_cnt % 10000 == 0:
                    print(f"已经处理 {line_cnt} 行数据")
                line = l.decode('utf-8')
                # line = line.strip()
                fields = line.split("\t", 3)  # 分解为6个字段，限制为6是防止博文内容含有\t
                if len(fields) != 4:
                    print(f"line {line_cnt} has error: {line}   {fields}")
                else:
                    uid = fields[0]
                    mid = fields[1]

                    # ================时间特征===============：
                    thisTime = time.strptime(fields[2], '%Y-%m-%d %H:%M:%S')
                    thisDate = datetime.date(thisTime.tm_year, thisTime.tm_mon, thisTime.tm_mday)
                    weekday = thisTime.tm_wday + 1  # 周几，1-7
                    yearday = thisTime.tm_yday  # 一年中第几天，1-366
                    is_workday = chinese_calendar.is_workday(thisDate)
                    is_holiday, holiday_name = chinese_calendar.get_holiday_detail(thisDate)
                    is_weekend = False
                    if weekday > 5:
                        is_weekend = True
                    hour = thisTime.tm_hour

                    timestamp = time.mktime(thisTime)  # 字符串->时间元组->时间戳（10位浮点）

                    sample = {
                        "timestamp": timestamp,
                        "raw_time": fields[2],
                        "time_weekday": weekday,
                        "time_yearday": yearday,
                        "time_hour": hour,
                        "time_is_workday": is_workday,
                        "time_is_holiday": is_holiday,
                        "time_is_weekend": is_weekend,
                        "time_holiday_name": holiday_name,
                    }

                    # =============博文特征=================：
                    content = fields[3].strip()
                    words = jieba.lcut(content)
                    sample['raw'] = content
                    sample['seg'] = words
                    for k in keywords:
                        raw = content
                        sample[f"key_{k}"] = len(re.findall(k, raw))
                    # 是否有主题（但不一定准确吧）
                    sample["has_theme"] = (pattern_theme.search(raw) != None)
                    # 是否at（@）
                    sample["has_at"] = (pattern_at.search(raw) != None)
                    # 是否转发（【转发】）
                    sample["has_forward"] = (pattern_forward.search(raw) != None)

                    # ===================用户特征================：
                    if uid in users:
                        sample["aveCom"] = users[uid]["aveCom"]
                        sample["aveLike"] = users[uid]["aveLike"]
                        sample["aveFor"] = users[uid]["aveFor"]
                    else:
                        print(f"line {line_cnt} has warning: 用户 {uid} 未在训练数据中找到")
                        sample["aveCom"] = global_ave_com
                        sample["aveLike"] = global_ave_like
                        sample["aveFor"] = global_ave_for

                    feature = [
                        sample["aveFor"], sample["aveCom"], sample["aveLike"],
                        sample["forPro"], sample["comPro"], sample["likePro"],
                    ]
                    feature.append(1 if sample['time_is_workday'] else 0)
                    feature.append(1 if sample['time_is_holiday'] else 0)
                    feature.append(1 if sample['time_is_weekend'] else 0)
                    feature.append(sample['time_weekday'] / 7.0)
                    feature.append(sample['time_hour'] / 24.0)

                    feature.append(1 if sample['has_theme'] else 0)
                    feature.append(1 if sample['has_at'] else 0)
                    feature.append(1 if sample['has_forward'] else 0)

                    kw_cnt = 0
                    kw_sum = 0
                    for key in sample:
                        if re.search("key_", key):
                            kw_sum += 1
                            if sample[key] > 0:
                                kw_cnt += 1
                    feature.append(float(kw_cnt) / float(kw_sum))

                    input = torch.tensor(feature).float()
                    forward_num = max(0, round(forward_net(input).item()))
                    comment_num = max(0, round(comment_net(input).item()))
                    like_num = max(0, round(like_net(input).item()))
                    result = f"{uid}\t{mid}\t{forward_num},{comment_num},{like_num}\n"
                    result_file.write(result)

            print(f"读取完成，共计 {line_cnt} 行")
