#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/4/28 14:22

"""
"""

import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# torchvision 包含了一些常用数据集，其输出是 PILImage 图像，值为 [0,1]
import logging
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F

import torch.optim as optim
import os
from torch.utils.data import Dataset
import argparse

# ============= log 设置 ===================
logger = logging.getLogger()  # 不加名称设置root logger
logger.setLevel(logging.INFO)
log_file = os.path.split(sys.argv[0])[1].replace(os.path.splitext(sys.argv[0])[1], "")
log_dir = "../log"
if not os.path.exists(log_dir):
    os.makedirs(log_dir)
logging_formmater = logging.Formatter(
    '%(asctime)s %(levelname)s (%(filename)s:%(lineno)d) %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S')
# 使用FileHandler输出到文件
fh = logging.FileHandler(f'{log_dir}/{log_file}.log', mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(logging_formmater)
# 使用StreamHandler输出到屏幕
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging_formmater)
# 添加两个Handler
logger.addHandler(ch)
logger.addHandler(fh)
# ============= log 设置 ===================


class WeiboNet(nn.Module):

    def __init__(self):
        super().__init__()

        self.fc1 = nn.Linear(15, 512)
        self.fc1 = nn.Linear(512, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 1)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x

def train(epoch):
    running_loss = 0.0
    running_num = 0
    for i, data in enumerate(trainloader, 0):  # 0 是起始下标位置
        # logging.info(f"i:{i}")
        # logging.info(f"data:{data}")

        # 获取这个batch的feature和label：
        inputs, labels = data['image'].to(device), data['label'].to(device)

        optimizer.zero_grad()
        outputs = lenet(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        running_num += labels.size(0)
        if i % 100 == 99:
            logging.info(f"epoch:{epoch}, iteration:{i+1}/{len(trainloader)}, loss:{running_loss/100.0/running_num}")
            running_loss = 0.0

def test(e, epoch):
    correct = 0
    total = 0
    diff = 0
    with torch.no_grad():
        for data in trainloader:

            inputs, labels = data['image'].to(device), data['label'].to(device)
            outputs = lenet(inputs)

            for index, l in enumerate(labels):
                diff += abs(l - outputs.data[index])

            # _, pred = torch.max(outputs.data, 1)
            total += labels.size(0)  # 这个batch的测试样本数
            # correct += (pred == labels).sum().item()
    # logging.info(f"epoch {e+1}/{epoch} correct: {correct}/{total}, acc: {correct/total*100}%")
    logging.info(f"epoch {e+1}/{epoch} diff: {(diff/total).item()}")

import json
import re

def read_data(data_file):
    with open(data_file, "r", encoding='utf-8') as f:
        users = json.load(f)
    weibo_data = [
        # ([各属性值],(转发数，评论数，点赞数))
    ]

    user_cnt = 0
    for uid in users:
        user_cnt += 1
        thisUser = users[uid]
        for mid in thisUser["blogs"]:
            thisBlog = users[uid]["blogs"][mid]

            feature = [
                thisUser["aveFor"], thisUser["aveCom"], thisUser["aveLike"],
                thisUser["forPro"], thisUser["comPro"], thisUser["likePro"],
            ]

            feature.append(1 if thisBlog['time_is_workday'] else 0)
            feature.append(1 if thisBlog['time_is_holiday'] else 0)
            feature.append(1 if thisBlog['time_is_weekend'] else 0)
            feature.append(thisBlog['time_weekday'] / 7.0)
            feature.append(thisBlog['time_hour'] / 24.0)

            feature.append(1 if thisBlog['has_theme'] else 0)
            feature.append(1 if thisBlog['has_at'] else 0)
            feature.append(1 if thisBlog['has_forward'] else 0)

            kw_cnt = 0
            kw_sum = 0
            for key in thisBlog:
                if re.search("key_", key):
                    kw_sum += 1
                    if thisBlog[key] > 0:
                        kw_cnt += 1
            feature.append(float(kw_cnt) / float(kw_sum))
            weibo_data.append(
                (tuple(feature), (thisBlog['forward_num'], thisBlog['comment_num'], thisBlog['like_num']))
            )
    logging.info(f"加载完成数据，共 {user_cnt} 用户")
    return weibo_data

class WeiboDataset(Dataset):
    def __init__(self, data_file, type = 0):

        self.data = read_data(data_file)
        self.type = type

        self.transform = None  # feature transformation
        self.target_transform = None  # label transformation

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sample = {
            "image": torch.tensor(self.data[idx][0]).float(),
            "label": torch.tensor(self.data[idx][1][self.type]).float()
        }
        return sample

def weight_init(m):
    if isinstance(m, nn.Linear):
        print("Linear")
        # nn.init.xavier_normal_(m.weight)
        nn.init.constant_(m.bias, 0)
        nn.init.constant_(m.weight, 0)
        # 也可以判断是否为conv2d，使用相应的初始化方式
    elif isinstance(m, nn.Conv2d):
        print("Conv2d")
        nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
        # 是否为批归一化层
    elif isinstance(m, nn.BatchNorm2d):
        print("BatchNorm2d")
        nn.init.constant_(m.weight, 1)
        nn.init.constant_(m.bias, 0)
    else:
        print("未检测到模块类型")

if __name__ == "__main__":
    # logging.info(f"os: {os.uname()}")
    logging.info("start")

    parser = argparse.ArgumentParser(
        description="Weibo MLP"
    )
    parser.add_argument(
        "--type",
        type=int,
        default=0,
        help="""0:forward, 1:comment, 2:like"""
    )
    args = parser.parse_args()
    assert args.type is not None, "must provide --type argument."
    net_type = args.type

    trainset = WeiboDataset("../../Data/feature_sum.json", net_type)
    logging.info(f"样本数量：{len(trainset)}")

    batch_size = 100
    num_workers = 0
    trainloader = DataLoader(
        trainset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers
    )  # windows上num_workers不为0时可能出现问题

    criterion = nn.MSELoss()

    lenet = WeiboNet()
    model_dir = 'saved_model'
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    model_filename = 'ForwardNet.pth'
    if net_type == 1:
        model_filename = "CommentNet.pth"
    elif net_type == 2:
        model_filename = "LikeNet.pth"

    model_path = os.path.join(model_dir, model_filename)
    # if os.path.exists(model_path):
    #     lenet.load_state_dict(torch.load(model_path))
    #     logging.info("已加载pretrained模型")
    # elif not os.path.exists(model_dir) :
    #     lenet.apply(weight_init)
    #     os.makedirs(model_dir)

    optimizer = optim.SGD(lenet.parameters(), lr=.001)  # 绑定（注册）参数

    # 在这里定义的 device 在之后调用的函数中也能直接引用到
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    # device = 'cpu'

    if device == 'cuda:0':
        lenet.to(device)

    epoch = 10000
    logging.info(f"batch_size:{batch_size} num_workers:{num_workers} device:{device}")
    for e in range(epoch):
        train(e)
        test(e, epoch)
        if e % 100 == 99:
            logging.info(f"当前epoch：{e}，已保存模型")
            torch.save(lenet.state_dict(), model_path)

    torch.save(lenet.state_dict(), model_path)
    logging.info("Finish Training")

