# coding:utf8
from collections import defaultdict

import numpy as np
import random
import pandas as pd
from sklearn.model_selection import train_test_split
def get_dataSet():
    from sklearn.datasets import make_classification
    dataSet,classLabel = make_classification(n_samples=200,
                                             n_features=100,
                                             n_classes=2)
    # print(classLabel.shape,dataSet.shape)
    return np.concatenate((dataSet,classLabel.reshape((-1,1))),axis=1)
def split_K_Dataset(dataSet,n_folds):
    fold_size = len(dataSet) / n_folds
    begin=0
    end = fold_size
    K_dataSet= list()
    for _ in range(n_folds):
        K_dataSet.append(dataSet[begin:end,:])
        begin = end
        end += fold_size
    return K_dataSet

# 分类
def most_to_split(dataSet):
    len1 = len(dataSet[np.nonzero(dataSet[:,-1]==0)[0],:])
    len2 = len(dataSet[np.nonzero(dataSet[:,-1]==1)[0],:])
    if len1 > len2:
        return 0
    else:
        return 1

def select_n_sample(dataSet,n):
    """
    构建n个子集
    有放回重复随机选取？
    :param dataSet:
    :param n:
    :return:
    """
    n_sample=[]
    for i in range(n):
        tmp = []
        for _ in range(len(dataSet)):
            tmp.append(np.random.randint((len(dataSet))))
        n_sample.append(dataSet[tmp,:])
    return n_sample

# 作回归时所用 计算方差
# 回归 值是连续的
def cal_regErr(dataSet:np.ndarray):
    return np.var(dataSet[:,-1])*np.shape(dataSet)[0]
# 计算平均值，回归时用
def cal_mean(dataSet:np.ndarray):
    return np.mean(dataSet[:,-1])
def bin_split_Dataset(dataSet, ax: int, value):
    """
    还是划分数据
    :param dataSet:
    :param ax: 特征值得索引
    :param value: 特征值
    :return:
    """
    # np.nonzero(**)[0] 返回非零元素的维度
    ds1 = dataSet[np.nonzero(dataSet[:,ax] <= value)[0],:]
    ds2 = dataSet[np.nonzero(dataSet[:,ax] > value)[0],:]
    return ds1,ds2
def cal_gini(dataSet:np.ndarray):
    num_sample = len(dataSet)
    classDict = defaultdict(int)
    n_gini = 0.0

    """
    效果等同下面两个循环!!
    for value in set(dataSet[:,-1]):
        error+=(len(np.nonzero(dataSet[:,-1] == value)[0]) / len(dataSet)) **2
    """
    for i in dataSet:
        classDict[i[-1]]+=1
    for key in classDict:
        prop=(float(classDict[key]/num_sample))**2
        n_gini += prop
    return 1-n_gini


def chooseBestfeatureTosplit(dataSet:np.ndarray,m,alpha='huigui'):
    """
    针对不同特征选择基尼指数最小的特征

    :param dataSet:
    :param m:int
    m个特征值
    :param alpha: '回归' or '分类'
    :return:
    """
    num_sample = len(dataSet)
    # 计算特征值共有多少种
    BestNews = float('inf')
    features = []
    index_feature = 0
    value_feature = 0

    for i in range(m):
        features.append(np.random.randint(len(dataSet[0])-1))
    if alpha == "huigui":
        S = cal_regErr(dataSet)
    else:  # 分类
        S = cal_gini(dataSet)

    for feature in features:
        values = set([example[feature] for example in dataSet])
        for value in values:
            ds1,ds2 = bin_split_Dataset(dataSet,feature,value)
            if alpha == "huigui":
                newS = cal_regErr(ds1) + cal_regErr(ds2)
            else:
                newS = cal_gini(ds1) + cal_gini(ds2)
            # 计算最小基尼数
            if BestNews > newS:
                BestNews = newS
                index_feature = feature
                value_feature = value
    if (S - BestNews) < 0.001 and alpha == 'huigui':
        return None,cal_mean(dataSet)
    elif (S - BestNews) < 0.001:
        return None,most_to_split(dataSet)
    return index_feature,value_feature

def createTree(dataSet,m,max_level=10,alpha="huigui"):
    Bestfeature,value_feature = chooseBestfeatureTosplit(dataSet,m,alpha)
    if Bestfeature == None:
        return value_feature
    max_level -= 1
    tree = {}
    if max_level <= 0:
        return
    if len(dataSet)-1 <= 0:
        return dataSet[0]
    tree['feature'] = Bestfeature
    tree['feature_value'] = value_feature
    tree['left'] = createTree(bin_split_Dataset(dataSet,Bestfeature,value_feature)[0],m,max_level)
    tree['right'] = createTree(bin_split_Dataset(dataSet,Bestfeature,value_feature)[1],m,max_level)

    return tree

# 一个树的预测
def treeForcast(dataSet,m,data):
    tree = createTree(dataSet,m)
    def sinForcast(tree:dict,data):
        if not isinstance(tree,dict):
            return tree
        if 'feature' in tree.keys():
            feature = tree.get('feature',0)
            if data[feature] < tree['feature_value']:
                sinForcast(tree['left'], data)
            else:
                sinForcast(tree['right'],data)
    ForcastResult = sinForcast(tree,data)
    return ForcastResult


if __name__ == "__main__":
    get_dataSet()