# -*- coding: utf-8 -*-
"""
Created on Mon Oct  8 14:32:52 2018

@author: luolei


"""
from scipy.ndimage.interpolation import shift
import numpy as np


def cal_stat_params(series):
	"""计算1维数据的统计学参数"""
	# 计算均值方差
	mean = np.mean(series)
	std = np.std(series)
	
	# 计算四分位数
	q1, q2, q3 = np.percentile(series, (25, 50, 75), interpolation = 'midpoint')
	iqr = abs(q3 - q1)
	
	# 汇总结果
	stat_params = {
		'mean': mean,
		'std': std,
		'percentiles': {
			'q1': q1,  # 下四分位数
			'q2': q2,  # 中位数
			'q3': q3,  # 上四分位数
			'iqr': iqr  # IQR值
		}
	}
	return stat_params


def get_binning_range(stat_params):
	"""根据统计参数确定分箱的上下界"""
	percentiles = stat_params['percentiles']
	q3, q1, iqr = percentiles['q3'], percentiles['q1'], percentiles['iqr']
	binning_range = [q1 - 1.5 * iqr, q3 + 1.5 * iqr]
	return binning_range


def series_binning(data, col, bins):
	"""
	数据变量分箱
	:param data: pd.DataFrame, 分箱数据来源
	:param col: str, 选中进行分箱的列
	:param bins: int, 分箱个数
	:return:
		freq_ns: list of ints, 频数
		labels: list of strs, 各箱的标签
	"""
	# 提取待分箱的变量序列
	data = data.copy()
	series = data[[col]].to_numpy().flatten()
	
	# 计算统计参数, 确定分箱上下界
	stat_params = cal_stat_params(series)
	binning_range = get_binning_range(stat_params)
	
	# 进行分箱
	freq_ns, intervals = np.histogram(series, bins, range = binning_range)
	labels = intervals[1:]
	return freq_ns, labels


def joint_2d_binning(data, col_a, col_b, bins):
	"""
	二维联合分箱
	:param data: pd.DataFrame, 分箱数据来源
	:param col_a: str, 选中进行分箱的列a
	:param col_b: str, 选中进行分箱的列b
	:param bins: list of ints, 各字段的分箱个数list, 如[bins_a, bins_b]
	:return:
		H: np.ndarray, 获得的二维频数统计表
		x_edges: np.ndarray, x变量离散值标签表
		y_edges: np.ndarray, y变量离散值标签表
	"""
	# 提取待分箱的变量序列
	data = data.copy()[[col_a, col_b]].to_numpy()
	series_a, series_b = data[:, 0], data[:, 1]
	
	# 计算统计参数，确定分箱的上下界
	binning_range = []
	for series in [series_a, series_b]:
		stat_params = cal_stat_params(series)
		binning_range.append(get_binning_range(stat_params))
	
	# 进行分箱
	H, x_edges, y_edges = np.histogram2d(
		series_a,
		series_b,
		bins = bins,
		range = binning_range
	)
	
	# ** 确保长度一致
	x_edges, y_edges = x_edges[1:], y_edges[1:]
	
	return H, x_edges, y_edges


def probability(freq_ns):
	freq_sum = np.sum(freq_ns)
	probs = freq_ns.copy() / freq_sum
	return probs


def univar_entropy(freq_ns):
	"""单变量熵"""
	eps = 1e-6
	probs = probability(freq_ns.copy())
	log_probs = np.log(probs + eps)
	entropy = - np.dot(probs, log_probs)
	return entropy


def joint_2d_entropy(H):
	"""联合分布熵"""
	eps = 1e-6
	probs = probability(H.copy())
	log_probs = np.log(probs + eps)
	joint_entropy = - np.sum(np.multiply(probs, log_probs))
	return joint_entropy


def info_entropy(data, col_a, col_b, bins):
	"""计算信息熵"""
	# 计算边际分布和联合分布
	freq_ns_a, labels_a = series_binning(data, col_a, bins[0])
	freq_ns_b, labels_b = series_binning(data, col_b, bins[1])
	H, x_edges, y_edges = joint_2d_binning(data, col_a, col_b, bins)
	
	# 计算信息熵
	e_a, e_b = univar_entropy(freq_ns_a), univar_entropy(freq_ns_b)
	je_ab = joint_2d_entropy(H)
	ie = e_a + e_b - je_ab
	
	return ie


def time_delay_ie_test(data, col_a, col_b, bins, max_lag, lag_step = 1):
	"""
	时域上的信息熵检测
	"""
	td_ie_results = {}
	for lag in range(-max_lag, max_lag + lag_step):
		data_lagged = data.copy()
		
		if col_a == col_b:
			data_lagged.rename(columns = {col_a: col_a + '_x'}, inplace = True)
			data_lagged[col_a + '_y'] = data_lagged[col_a + '_x']
			col_a += '_x'
			col_b += '_y'
			data_lagged[col_b] = shift(data_lagged[col_b], lag)
			ie = info_entropy(data_lagged, col_a, col_b, bins)
			col_a, col_b = col_a[: -2], col_b[: -2]
		else:
			data_lagged[col_b] = shift(data_lagged[col_b], lag)
			ie = info_entropy(data_lagged, col_a, col_b, bins)
		
		td_ie_results[lag] = ie
	
	return td_ie_results


