#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division

'functions'

__author__ = 'Ma Cong'

import torch
import numpy as np


def accuracy(y_true, y_pred):
    y_pred = round_choice(y_pred)
    wrong = np.sum((1 - y_true) * y_pred + y_true * (1 - y_pred))
    all = np.sum(round_choice(np.clip(y_true, 1, 1)))
    return (all - wrong) / (all + 1e-6), all - wrong

def precision(y_true, y_pred):
    # Calculates the precision
    # test = y_true * y_pred
    # test = np.clip(test)
    true_positives = np.sum(round_choice(np.clip(y_true * y_pred, 0, 1)))
    predicted_positives = np.sum(round_choice(np.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + 1e-6)
    return precision


def recall(y_true, y_pred):
    # Calculates the recall
    true_positives = np.sum(round_choice(np.clip(y_true * y_pred, 0, 1)))
    possible_positives = np.sum(round_choice(np.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + 1e-6)
    return recall


def fbeta_score(y_true, y_pred, beta=1):
    # Calculates the F score, the weighted harmonic mean of precision and recall.
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if np.sum(np.round(np.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + 1e-6)
    return fbeta_score


def fmeasure(p, r):
    # Calculates the f-measure, the harmonic mean of precision and recall.
    return 2 * (p * r) / (p + r + 1e-6)


def sigmoid(x):
    return 1 / (1 + np.exp(-x))

global_mid = 0.5

def round_choice(x):
    mid = global_mid
    x[x > mid] = 1
    x[x == mid] = 1
    x[x < mid] = 0
    return x

def round_choice_submit(x):
    mid = global_mid
    for i in range(x.shape[0]):
        xi = x[i]
        tmp = np.max(xi)
        if tmp < mid:
            mid = tmp
        xi[xi > mid] = 1
        xi[xi == mid] = 1
        xi[xi < mid] = 0
        x[i] = xi
    # tmp = np.max(x)
    # if tmp < mid:
    #     mid = tmp
    # x[x > mid] = 1
    # x[x == mid] = 1
    # x[x < mid] = 0
    return x