# -*-coding:utf-8-*-
import math
import numpy as np
import utils.common as common
from utils.feature import extract_delta
from utils.vad2 import vad_core
from utils.common import get_wav_len, my_argmax


def get_test_acc(result_file):
    count = 0
    true_count = 0
    with open(result_file, 'r') as f:
        while True:
            line = f.readline()
            if line == '':
                break
            count += 1
            tmp = line.strip().split()
            if len(tmp) == 2:
                wav_path, pred = tmp
                label = wav_path.split('/')[-2]
                if label == pred:
                    true_count += 1
    return true_count/(count + 1e-8)


def postprocess(wav_path, X, Y, model, posterior_thresh, utt_percent_thresh, no_activity_label, label_level='frame', is_delta=True):
    '''解码预处理
    '''
    Y = np.squeeze(Y)
    X = np.expand_dims(X, axis=0)
    X = np.expand_dims(X, axis=1)
    if is_delta:
        X = extract_delta(X)

    pred = model.predict(X)
    pred = pred > posterior_thresh
    pred = np.squeeze(pred, axis=0)                 # 丢掉batch维
    pred = pred.astype(int)                         # 转化为0/1矩阵
    is_vad = False

    # 对于帧级别的标注，需要加上VAD & 将帧级别映射到块级别
    if label_level == 'frame':
        if is_vad:
            duration = get_wav_len(wav_path)
            vad_time_infos = vad_core(wav_path)
            vad_mask = np.ones(pred.shape[0]) * -1
            for info in vad_time_infos:
                start_frame = math.ceil(pred.shape[0] * (info[1] / duration))
                end_frame = math.floor(pred.shape[0] * (info[2] / duration))
                vad_mask[start_frame:end_frame] = 1
            pred = pred * np.expand_dims(vad_mask, -1)      # 加上VAD的mask
        pred = pred[(pred >= 0).all(axis=1)]            # 筛选出全部非负的帧，即非静音帧
        pred = np.sum(pred, axis=0)/(pred.shape[0] + 1e-8)       # 如果该句子某一类总比例超过utt_percent_thresh，则标注为该类

    print(wav_path, pred)
    pred = (pred.max() == pred) * pred              # 只保留最大的那个类，评分时默认不允许多分类，但是输出标注可以
    Y = np.sum(Y, axis=0)/(Y.shape[0] + 1e-8)           # 从帧级别映射到块
    pred = np.int64(pred > utt_percent_thresh)
    pred_label = my_argmax(pred, no_activity_label)
    true_label = np.argmax(Y)

    return true_label, pred_label, pred


#####################
# Scoring functions
#
# Code blocks taken from Toni Heittola's repository: http://tut-arg.github.io/sed_eval/
#
# Implementation of the Metrics in the following paper:
# Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen, 'Metrics for polyphonic sound event detection',
# Applied Sciences, 6(6):162, 2016
#####################


def f1_overall_framewise(O, T):
    if len(O.shape) == 3:
        O, T = common.reshape_3Dto2D(O), common.reshape_3Dto2D(T)
    TP = ((2 * T - O) == 1).sum()
    Nref, Nsys = T.sum(), O.sum()

    prec = float(TP) / float(Nsys + common.eps)
    recall = float(TP) / float(Nref + common.eps)
    f1_score = 2 * prec * recall / (prec + recall + common.eps)
    return f1_score


def er_overall_framewise(O, T):
    if len(O.shape) == 3:
        O, T = common.reshape_3Dto2D(O), common.reshape_3Dto2D(T)
    FP = np.logical_and(T == 0, O == 1).sum(1)
    FN = np.logical_and(T == 1, O == 0).sum(1)

    S = np.minimum(FP, FN).sum()
    D = np.maximum(0, FN-FP).sum()
    I = np.maximum(0, FP-FN).sum()

    Nref = T.sum()
    ER = (S+D+I) / (Nref + 0.0)
    return ER


def f1_overall_1sec(O, T, block_size):
    if len(O.shape) == 3:
        O, T = common.reshape_3Dto2D(O), common.reshape_3Dto2D(T)
    new_size = int(np.ceil(O.shape[0] / block_size))
    O_block = np.zeros((new_size, O.shape[1]))
    T_block = np.zeros((new_size, O.shape[1]))
    for i in range(0, new_size):
        O_block[i, :] = np.max(O[int(i * block_size):int(i * block_size + block_size - 1), ], axis=0)
        T_block[i, :] = np.max(T[int(i * block_size):int(i * block_size + block_size - 1), ], axis=0)
    return f1_overall_framewise(O_block, T_block)


def er_overall_1sec(O, T, block_size):
    if len(O.shape) == 3:
        O, T = common.reshape_3Dto2D(O), common.reshape_3Dto2D(T)
    new_size = int(O.shape[0] / block_size)
    O_block = np.zeros((new_size, O.shape[1]))
    T_block = np.zeros((new_size, O.shape[1]))
    for i in range(0, new_size):
        O_block[i, :] = np.max(O[int(i * block_size):int(i * block_size + block_size - 1), ], axis=0)
        T_block[i, :] = np.max(T[int(i * block_size):int(i * block_size + block_size - 1), ], axis=0)
    return er_overall_framewise(O_block, T_block)


def compute_scores(pred, y, frames_in_1_sec=50):
    scores = dict()
    scores['f1_overall_1sec'] = f1_overall_1sec(pred, y, frames_in_1_sec)
    scores['er_overall_1sec'] = er_overall_1sec(pred, y, frames_in_1_sec)
    return scores
