#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-10-14
import json
import logging
import os

import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle as sklearn_shuffle
import argparse
import pandas as pd
import csv

label2id = {
    'app': 0,
    'bus': 1,
    'calc': 2,
    'chat': 3,
    'cinemas': 4,
    'contacts': 5,
    'cookbook': 6,
    'datetime': 7,
    'email': 8,
    'epg': 9,
    'flight': 10,
    'health': 11,
    'lottery': 12,
    'map': 13,
    'match': 14,
    'message': 15,
    'music': 16,
    'news': 17,
    'novel': 18,
    'poetry': 19,
    'radio': 20,
    'riddle': 21,
    'schedule': 22,
    'stock': 23,
    'telephone': 24,
    'train': 25,
    'translation': 26,
    'tvchannel': 27,
    'video': 28,
    'weather': 29,
    'website': 30
}

id2label = {v: k for k, v in label2id.items()}


def get_single_fold(base_path, fold: int) -> (np.array, np.array, np.array, np.array):
    """
    用于读取npy数据文件
    :param base_path: 数据文件夹路径
    :param fold: 第几折数据
    :return:
    """
    train_x_path = os.path.join(base_path, 'train_x%d.npy' % fold)
    train_y_path = os.path.join(base_path, 'train_y%d.npy' % fold)
    dev_x_path = os.path.join(base_path, 'dev_x%d.npy' % fold)
    dev_y_path = os.path.join(base_path, 'dev_y%d.npy' % fold)
    train_x = np.load(train_x_path)
    train_y = np.load(train_y_path).astype(np.int32)
    train_y = train_y.reshape((len(train_y), 1))
    dev_x = np.load(dev_x_path)
    dev_y = np.load(dev_y_path).astype(np.int32)
    dev_y = dev_y.reshape((len(dev_y), 1))

    return train_x, train_y, dev_x, dev_y


def get_POS_single_fold(base_path, fold: int) -> (np.array, np.array, np.array, np.array):
    """
    用于读取POS的npy数据文件
    :param base_path: 数据文件夹路径
    :param fold: 第几折数据
    :return:
    """
    train_POS_path = os.path.join(base_path, 'POStrain_x%d.npy' % fold)
    dev_POS_path = os.path.join(base_path, 'POSdev_x%d.npy' % fold)
    train_POS = np.load(train_POS_path)
    dev_POS = np.load(dev_POS_path)
    return train_POS, dev_POS


def undersample(x, y):
    """
    欠抽样，因为训练数据不平衡，取数量少的一类的全部样本，得到n个样本，
    然后在数量多的一类中随机抽样n个样本，总共得到2n个样本，随机打乱之后返回
    :param x:
    :param y:
    :return: 返回欠采样后的样本
    """
    positive = np.where(y == 1)[0]
    negative = np.where(y == 0)[0][:len(positive)]
    positive_x = x[positive]
    positive_y = y[positive]
    negative_x = x[negative]
    negative_y = y[negative]
    x_ = np.append(negative_x, positive_x, axis=0)
    y_ = np.append(negative_y, positive_y, axis=0)
    return sklearn_shuffle(x_, y_)


def generate_batch(dataX, dataY, batch_size=16, shuffle=False, undersampling=False):
    """
    batch生成器，每次yield一个batch的数据
    :param dataX:
    :param dataY:
    :param batch_size: batch 数量
    :param shuffle: 是否随机抽样
    :param undersampling: 是否作欠采样
    :return:
    """
    length = len(dataX)
    if shuffle and not undersampling:
        x, y = sklearn_shuffle(dataX, dataY)
    elif not undersampling:
        x = dataX
        y = dataY
    else:
        x, y = undersample(dataX, dataY)

    for start in range(0, length, batch_size):
        end = min(start + batch_size, length)
        yield x[start:end], y[start:end]
    # raise StopIteration()
    return  # make it compatible with python3.7


def is_oov(X_token, _unk_token=1):
    if _unk_token in X_token:
        return True
    return False


def save_hparams(hparams, path):
    """
    将Namespace对象转成dict再转成字符串保存
    Here we use code from https://github.com/Kyubyong/transformer/blob/master/utils.py
    Saves hparams to path
    hparams: argsparse object: Namespace
    path: output directory.
    Writes
    hparams as literal dictionary to path.
    """
    if not os.path.exists(path):
        os.makedirs(path)
    # vars() 函数返回对象object的属性和属性值的字典对象
    hp = json.dumps(vars(hparams))
    with open(os.path.join(path, "hparams"), 'w') as fout:
        fout.write(hp)


def load_hparams(parser: argparse.Namespace, path):
    '''
    加载超参数
    Here we use code from https://github.com/Kyubyong/transformer/blob/master/utils.py

    Loads hparams and overrides parser
    parser: argsparse parser
    path: directory or file where hparams are saved
    '''
    if not os.path.isdir(path):
        path = os.path.dirname(path)
    d = open(os.path.join(path, "hparams"), 'r').read()
    flag2val = json.loads(d)
    for f, v in flag2val.items():
        setattr(parser, f, v)


def get_logger(name, level=logging.INFO, path='log'):
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    # 输出到文件
    file_path = os.path.join(path, 'train.log')
    fh = logging.FileHandler(file_path, 'w')
    fh.setFormatter(formatter)
    # 输出到控制台
    sh = logging.StreamHandler()
    sh.setFormatter(formatter)

    logging.basicConfig(level=level, handlers=[fh, sh])
    logger = logging.getLogger(name)
    return logger


def load_POS2dict(files: list, save_path: str, BERT_mode=False) -> dict:
    """
    通过训练集和开发集加载POS字典，并以json形式保存到文件
    :param files: 要读取的文件列表 [str, str, ...]   [file_path1, file_path2]
    :param save_path: 要保存的json文件
    :return: pos字典， {pos: id}
    """
    collection = set()
    bert_collection = set()
    # 读取传入的文件路径，生成POS集合
    for f in files:
        with open(f, 'r', encoding='utf-8') as fp:
            for line in fp:
                for pos in line.strip().split():
                    collection.add(pos)
                    if BERT_mode:
                        bert_collection.add('B-' + pos)
                        bert_collection.add('I-' + pos)
    l = sorted(list(collection))
    d = {x: i for i, x in enumerate(l, start=1)}
    d['_UNK'] = 0
    # 以json形式保存POS到文件
    with open(save_path, 'w', encoding='utf-8') as fp:
        json.dump(d, fp)
    if BERT_mode:
        l_bert = sorted(list(bert_collection))
        d_bert = {x: i for i, x in enumerate(l_bert, start=1)}
        d_bert['_UNK'] = 0
        # 以json形式保存POS到文件
        with open(save_path.replace('.', '_bert.'), 'w', encoding='utf-8') as fp:
            json.dump(d_bert, fp)
        return d_bert
    return d


def load_POS_file(path: str) -> dict:
    """
    在加载POS的json文件
    :param path: json文件位置
    :return:
    """
    with open(path, 'r', encoding='utf-8') as fp:
        d = json.load(fp)
    return d


def get_checkpoints(log_dir, nfold=10):
    ckpt = []
    for i in range(nfold):
        path = os.path.join(log_dir, str(i))
        latest = tf.train.latest_checkpoint(path)
        ckpt.append(latest)
    return ckpt


class Calculate:

    def __init__(self, badcase_file):
        '''
        badcase_file: badcase数据存放路径 ./badcase
        '''
        self.badcase = pd.read_csv(badcase_file)
        self.Truth = self.badcase['正确']
        self.Predict = self.badcase['预测']

    def Confusion_matrix(self, label):
        '''
        计算混淆矩阵，返回 p,r,f1
        
        label: 计算混淆矩阵的类别
        '''
        TP, FP, FN, TN = 0, 0, 0, 0
        for truth, predict in zip(self.Truth, self.Predict):
            if truth == label and predict == label:
                TP += 1
            elif truth != label and predict == label:
                FP += 1
            elif truth == label and predict != label:
                FN += 1
            elif truth != label and predict != label:
                TN += 1
        if TP + FP == 0:
            p = r = f1 = 0.0
        else:
            p = TP / (TP + FP)
            r = TP / (TP + FN)
            f1 = (2 * p * r) / (p + r)
        print("label: ", label, " done")
        return round(p, 4), round(r, 4), round(f1, 4)

    def Confusion_matrix_31(self):
        '''
        计算31分类的混淆矩阵, 返回labels， P， R ，F1列表
        '''

        Confusion_matrix_dict = {}
        labels, P, R, F1 = [], [], [], []
        for key, value in label2id.items():
            p, r, f1 = self.Confusion_matrix(key)
            P.append(p)
            R.append(r)
            F1.append(f1)
            labels.append(key)

        return labels, P, R, F1

    def get_confusion_matrix_31_csv(self, path):
        '''
        计算31分类混淆矩阵并且保存到path
        '''

        labels, P, R, F1 = self.Confusion_matrix_31()
        confusion_matrix_dataframe = pd.DataFrame(columns=['label', 'P', 'R', 'F1'])
        confusion_matrix_dataframe['label'] = labels
        confusion_matrix_dataframe['P'] = P
        confusion_matrix_dataframe['R'] = R
        confusion_matrix_dataframe['F1'] = F1
        confusion_matrix_dataframe.to_csv(path, sep=',')
        print('done')


if __name__ == '__main__':
    # load_POS2dict(['data/10fold_tagger/dev_x0.txt', 'data/10fold_tagger/train_x0.txt'],
    #               'data/10fold_tagger/POS.json')
    # print(load_POS_file('data/10fold_tagger/POS.json'))
    # get_single_fold('data/smp2018/10flod_all_3600+_未转阿拉伯', 9)
    # ensemble('log/')
    calculate = Calculate('log/2019-04-10-00:04/0.78931.csv')
    calculate.get_confusion_matrix_31_csv('log/2019-04-10-00:04/0.78931_matrix.csv')
