import pandas as pd
import numpy as np
import collections
import random
from sklearn.metrics import r2_score
import pickle

feature_type = {}

class TreeNode():
    def __init__(self):
        self.next_node = []
        self.son_Num = 0
        self.feature2son = {}           # for diff types
        self.split_value = None         # for num
        self.split_feature = None
        self.label = None               # only leaf node has label

    def get_label(self, item):
        if self.label is not None:
            return self.label
        if self.split_value is not None:
            if item[self.split_feature] <= self.split_value:
                return self.next_node[0].get_label(item)
            else:
                return self.next_node[1].get_label(item)
        else:
            feature = item[self.split_feature]
            son_id = self.feature2son[self.split_feature]
            return self.next_node[son_id]

    @staticmethod
    def calc_leaf_value(label):         # 选择样本中出现次数最多的类别作为叶子节点取值
        k = label.mean()
        return k

class Decision_Tree():
    def __init__(
        self, max_depth=-1, min_samples_split=2,
        min_samples_leaf=1, min_split_gain=0.0, 
        random_state=2 
    ):
        self.root = None
        self.real_depth = 0
        self.max_depth = max_depth if max_depth != -1 else float('inf')
        self.min_samples_split = min_samples_split
        self.min_samples_leaf = min_samples_leaf
        self.min_split_gain = min_split_gain
        self.random_state = random_state   

    def fit(self, items, labels, depth):
        tree_node = TreeNode()
        self.real_depth = max(self.real_depth, depth)
        if (depth >= self.max_depth) or (len(labels) <= self.min_samples_split) or (len(items.columns) <= 1):
            tree_node.label = tree_node.calc_leaf_value(labels['charges'])
        else:
            [(items_1, label_1), (items_2, label_2)], gold_feature, gold_val, gold_entrophy\
                = self.find(items, labels)
            if len(label_1) < self.min_samples_leaf\
                or len(label_2) < self.min_samples_leaf or gold_feature is None:
                tree_node.label = tree_node.calc_leaf_value(labels['charges'])
            else:
                node_1 = self.fit(items_1, label_1, depth+1)
                node_2 = self.fit(items_2, label_2, depth+1)
                tree_node.next_node = [node_1, node_2]
                tree_node.split_feature = gold_feature
                tree_node.split_value = gold_val

        if depth == 0:
            self.root = tree_node
        return tree_node

    def find(self, items, labels):
        gold_feature = None
        # all are splited by number
        gold_val = None
        gold_entrophy = float('inf')
        label_1 = None
        items_1 = None

        label_2 = None
        items_2 = None
        for feature in items.columns:
            all_val = items[feature].unique()
            all_val = sorted(all_val.tolist())
            for val in all_val:
                label_1 = labels[items[feature] <= val]
                label_2 = labels[items[feature] > val]
                ratio_1 = len(label_1) / len(labels)
                ratio_2 = len(label_2) / len(labels)
                if len(label_1) < self.min_samples_leaf or len(label_2) < self.min_samples_leaf:
                    continue
                entrophy = self.cal_loss(label_1) + self.cal_loss(label_2)
                if entrophy < gold_entrophy:
                    gold_feature = feature
                    gold_val = val  
                    gold_entrophy = entrophy 
        if gold_feature is not None:
            label_1 = labels[items[gold_feature] <= gold_val]
            items_1 = items[items[gold_feature] <= gold_val]#.drop([gold_feature], axis=1)

            label_2 = labels[items[gold_feature] > gold_val]
            items_2 = items[items[gold_feature] > gold_val]#.drop([gold_feature], axis=1)
            # print(gold_feature)
        return [(items_1, label_1), (items_2, label_2)], gold_feature, gold_val, gold_entrophy

    @staticmethod
    def cal_loss(label):
        c = label.mean().values
        loss = 0.
        n = len(label)
        l = label.values.tolist()
        for i in range(n):
            loss += (c - l[i])**2
        return loss[0] / n

    def get_label(self, item): # one item
        return self.root.get_label(item)

    def predict(self, items):
        pred = []
        for idx, item in items.iterrows():
            pred.append(self.get_label(item))
        return pred


class RandomForest_Classifier(): # use gini
    def __init__(
        self, n_estimators=20, max_depth=-1, min_samples_split=2,
        min_samples_leaf=1, min_split_gain=0.0, 
        sample_rate=0.6, feature_rate=0.5, random_state=2 
    ):
        """
        @Param n_estimators:        随机森林中决策树的数量
        @Param max_depth:           决策树的深度，-1表示不限制深度
        @Param min_samples_split:   节点分裂所需的最小样本数量，小于该值节点终止分裂
        @Param min_samples_leaf:    叶子节点最少样本数量，小于该值叶子被合并
        @Param min_split_gain:      分裂所需的最小增益，小于该值节点终止分裂
        @Param random_state:        随机种子，设置之后每次生成的n_estimators个样本集不会变，确保实验可重复
        @Param sample_rate:           自助法中获取样本的比例
        @Param feature_rate:        从属性集合中取样本特征的比例
        """

        self.n_estimators = n_estimators
        self.max_depth = max_depth if max_depth != -1 else float('inf')
        self.min_samples_split = min_samples_split
        self.min_samples_leaf = min_samples_leaf
        self.min_split_gain = min_split_gain
        self.random_state = random_state
        self.trees = []
        self.feature_importances_ = dict()  
        self.sample_rate = sample_rate
        self.feature_rate = feature_rate
        if random_state is not None:
            random.seed(self.random_state)

    def get_bootstrap_data(self, items, labels):     # get bootstrap sample num
       
        self.feature_num = int(self.feature_rate * len(items.columns))
        self.item_num = int(self.sample_rate * len(items))

        for tree_num in range(self.n_estimators):
            random_item_indexes = random.sample(range(len(items)), self.item_num)  
            random_feature_indexes = random.sample(items.columns.tolist(), self.feature_num)   
            
            items_random = items.loc[random_item_indexes, random_feature_indexes].reset_index(drop=True)
            label_random = labels.loc[random_item_indexes].reset_index(drop=True)
            yield items_random, label_random

    def fit(self, items, labels):                   # features dataframe
        for items_random, labels_random in self.get_bootstrap_data(items, labels):
            tree = Decision_Tree(
                max_depth = self.max_depth,
                min_samples_split = self.min_samples_split,
                min_samples_leaf = self.min_samples_leaf,
                min_split_gain = self.min_split_gain,
                random_state = self.random_state               
            )
            tree.fit(items_random, labels_random, 0)
            self.trees.append(tree)

    def score(self, items, labels):                 # use R-square score
        pred = self.predict(items)
        f1 = r2_score(np.array(labels['charges'].tolist()), pred)
        return f1

    def predict(self, items):
        pred = []
        for idx, item in items.iterrows():
            labels = []
            for tree in self.trees:
                res = tree.get_label(item)
                labels.append(res)
            k = np.mean(labels)
            pred.append(k)
        return np.array(pred)
