import pandas as pd
import numpy as np
from scipy.special import expit  # sigmoid


def ensembleNormalizedRank(list_of_scores, weights=None):
    '''
    对rank进行归一化，然后融合。

    示例：
        list_of_scores:
        [
            [-1, 2, 10],
            [20, 30, 40],
        ]
        会被转换成=>
        [
            [0, 0.33333333, 1],
            [0, 0.33333333, 1]
        ]
        然后平均，最后输出为：
        np.array([0, 0.33333333, 1])
    '''
    all_scores = pd.DataFrame(np.stack(list_of_scores, axis=1))
    if weights is None:
        result = (all_scores.rank() / all_scores.shape[0]).mean(axis=1).values
    else:
        weights = np.asarray(weights)
        assert len(weights) == all_scores.shape[1]
        normalized_weight = (all_scores.rank() / all_scores.shape[0])
        weighted = normalized_weight * weights
        result = (weighted.sum(axis=1) / weights.sum()).values
    return result


def ensembleLogits(list_of_scores, alpha=1e-2, weights=None):
    '''
    假设输入的是概率（在0~1）之间。会先将他转换成logits。然后对logits进行融合。
    融合时会根据logits的方差自动加上权重。
    alpha是一个参数，alpha越小，就越是原始的logits。
    参考：https://www.kaggle.com/aharless/victor-s-mix-with-logits-now-regularized

    输入：
        list_of_scores:
        [
            [0.1, 0.2, 0.3],
            [0.1, 0.9, 0.3],
        ]
    输出：
        array([ 0.10996026,  0.35923765,  0.30127895])
    '''
    all_scores = pd.DataFrame(np.stack(list_of_scores, axis=1))
    logits = all_scores.applymap(lambda x: np.log(x / (1 - x)))  # 概率 -> logits
    logits *= 1 - (alpha * (logits**2))  # 不知道是什么意思，某种修正？当alpha很小时这项无效
    stdevs = logits.std()
    w = .2 / stdevs
    if weights is None:
        wa = (w * logits).sum(axis=1) / w.sum()
        result = wa.apply(expit).values
    else:
        weights = np.asarray(weights)
        assert len(weights) == all_scores.shape[1]
        wa = (weights * (w * logits)).sum(axis=1) / ((w * weights).sum())  # w和weights都是权重
        result = wa.apply(expit).values
    return result
