import logging
import pandas as pd
import numpy as np
import random
import copy
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class Random_Forest():
    def __init__(self, tree_num, b_category, bootstrapping, b_test):
        self.tree_num = tree_num
        self.category = b_category
        self.bootstrapping = bootstrapping
        self.b_test = b_test
        self.my_tree = self.creat_forest()

    def creat_forest(self):
        my_tree = []
        for i in range(self.tree_num):
            logger.info(f"创建第{i + 1}棵树")
            my_tree.append(self.creat_tree(self.bootstrapping[i], self.category[i]))
        return my_tree

    def creat_tree(self, data, labels, feature_labels=[], best_feature_medv_mean=None):
        # 三种结束情况
        data_medv = [exampel[0] for exampel in data]
        if len(data) == 0:
            return Node(best_feature_medv_mean)
        if len(data) == 1:
            return Node(data[0][0])
        # 如果分支的则停止分类
        df = pd.DataFrame(data)
        if np.var(df.iloc[:, 0]) < 0.025:
            return Node(np.mean(df.loc[:, 0]))
        if data_medv.count(data_medv[0]) == len(data_medv):
            return Node(data_medv[0])
        # 遍历完所有特征时返回出现次数最多的类标签
        if len(data[0]) == 2:
            return Node(self.regLeaf(data))
        # 最优特征的标签
        best_feature = self.get_best_feature(data, labels)
        best_feature_name, best_feature_point = best_feature[0].split('-')
        best_feature_medv_mean = best_feature[1]
        feature_labels.append(best_feature_point)
        node = Node(f'{best_feature_name}={best_feature_point}')
        ldata = []
        rdata = []
        for d in data:
            if d[labels.index(best_feature_name)] < float(best_feature_point):
                del d[labels.index(best_feature_name)]
                ldata.append(d)
            else:
                del d[labels.index(best_feature_name)]
                rdata.append(d)
        labels2 = copy.deepcopy(labels)
        labels2.remove(best_feature_name)
        tree = node
        tree.lchild = self.creat_tree(ldata, labels2, feature_labels, best_feature_medv_mean)
        tree.rchild = self.creat_tree(rdata, labels2, feature_labels, best_feature_medv_mean)
        return tree


    # 计算平均值，回归时使用，预测阶段使用该叶子结点的均值作为回归值
    def regLeaf(self, data):
        df = pd.DataFrame(data)
        return np.mean(df.iloc[:, 0])

    def get_best_feature(self, data, labels):
        df = pd.DataFrame(data, columns=labels)
        best_feature, tree_feature_mse = [], {}
        for i in range(1, len(labels)):
            cur_feature_info = df[labels[i]].describe().to_dict()
            min = cur_feature_info["min"]
            max = cur_feature_info['max']
            part_length = (max - min) / 10
            all_points = []
            for j in range(1, 10):
                all_points.append(min + j * part_length)
            cur_feature_mse = []
            for point in all_points:
                left = df[df[labels[i]] <= point]
                right = df[df[labels[i]] > point]
                # left_mean=left['MEDV'].describe().to_dict()['mean']
                # right_mean=right['MEDV'].describe().to_dict()['mean']
                cur_point_mse = np.var(left['MEDV']) + np.var(right['MEDV'])
                cur_feature_mse.append(cur_point_mse)
            # print('1')
            # best_point = all_points[cur_feature_mse.index(min(cur_feature_mse))]
            best_point = all_points[(np.array(cur_feature_mse)).argmin()]
            tree_feature_mse[f'{labels[i]}-{best_point}'] = np.min(cur_feature_mse)
        tree_feature_mse = sorted(tree_feature_mse.items(), key=lambda x: x[1])
        best_feature.extend([tree_feature_mse[0][0], np.mean(df['MEDV'])])
        # best_feature.extend([tree_feature_mse[0][0], tree_feature_mse[0][1]])
        # labels.remove(tree_feature_mse[0][0])
        return best_feature


class Node(object):
    def __init__(self, item):
        self.name = item
        self.lchild = None
        self.rchild = None


def breadth_travel(tree):
    """广度遍历,可以将树结构打印出来，进行展示"""
    queue = [tree]
    while queue:
        cur_node = queue.pop(0)
        print(cur_node.name, end=" ")
        if cur_node.lchild is not None:
            queue.append(cur_node.lchild)
        if cur_node.rchild is not None:
            queue.append(cur_node.rchild)
    print('\n')
    # print('$$$$$$$$$$$$$$')


class Evaluate:
    def __int__(self):
        pass

    def prediction(self, t_tree, test, labels):
        result = []
        for data in test:
            l = []
            l = copy.deepcopy(labels)
            tree = t_tree
            for i in range(len(labels)):
                # if type(tree.name) == float:
                #     result.append(tree.name)
                #     break
                j = 1
                flag = 0
                while j:
                    if type(tree.name) != str:
                        result.append(tree.name)
                        flag = 1
                        break
                    if tree.name.split('=')[0] == l[j]:
                        break
                    j += 1
                if flag == 1:
                    break
                if data[j] < float(tree.name.split('=')[1]):
                    tree = tree.lchild
                else:
                    tree = tree.rchild
                del (l[j])
                del (data[j])
        return result

    def new_pre(self, t_test, labels, tree):
        result = []
        r = []
        for i in range(len(t_test)):
            label = []
            label = copy.deepcopy(labels[i])
            # print(label)
            # breadth_travel(tree[i])
            r.append(self.prediction(tree[i], t_test[i], label))
        rr = []
        for i in range(len(r[0])):
            rr.append([])
        for i in range(len(rr)):
            for j in range(len(r)):
                rr[i].append(r[j][i])
        # print(rr)
        for i in range(len(rr)):
            result.append(np.mean(rr[i]))
        return result

    def eveal(self, y_test, predict):
        y_test = [i[0] for i in y_test]
        score = r2_score(y_true=y_test, y_pred=predict)
        return score


def loadDataset(tree_num, del_feature_num):
    boston = datasets.load_boston()
    feature = boston.data
    target = boston.target
    # target.resize((506,1))
    target = target.reshape((-1, 1))
    # b = np.hstack((target, feature))
    data_set = np.concatenate((target, feature), axis=1)
    data_set = data_set.tolist()
    data_set, test_set = train_test_split(data_set, test_size=0.2)
    category = boston.feature_names.tolist()
    category.insert(0, 'MEDV')

    # a=data.sample(400)

    bootstrapping = []
    b_category = []
    b_test = []
    for i in range(tree_num):
        b_category.append(copy.deepcopy(category))
        b_test.append(copy.deepcopy(test_set))
        bootstrapping.append([])
        for j in range(len(data_set)):  # 从原始数据中随机采样
            num = int(np.floor(np.random.random() * len(data_set)))
            set = data_set[num]
            bootstrapping[i].append(copy.deepcopy(set))

    n_num_category = []
    for i in range(tree_num):
        n_num_category.append(random.sample(range(1, len(category)), del_feature_num))

    for i in range(tree_num):
        for j in range(del_feature_num):
            b_category[i][n_num_category[i][j]] = 0
        for j in range(del_feature_num):
            b_category[i].remove(0)

        for k in range(len(b_test[i])):
            for j in range(del_feature_num):
                b_test[i][k][n_num_category[i][j]] = -1
        for k in range(len(b_test[i])):
            for j in range(del_feature_num):
                b_test[i][k].remove(-1)

        for k in range(len(bootstrapping[i])):
            for j in range(del_feature_num):
                bootstrapping[i][k][n_num_category[i][j]] = -1
        for k in range(len(bootstrapping[i])):
            for j in range(del_feature_num):
                bootstrapping[i][k].remove(-1)
    return b_category, bootstrapping, b_test  # 依次是训练集和测试集


def main():
    test_r2 = []
    for i in range(20):
        tree_num = i+1
        b_category, bootstrapping, b_test = loadDataset(tree_num=tree_num, del_feature_num=2)
        b2_category = copy.deepcopy(b_category)
        forest = Random_Forest(tree_num, b2_category, bootstrapping, b_test)
        # 展示下生成的随机森林
        for i in range(tree_num):
            print(b_category[i])
            breadth_travel(forest.my_tree[i])
        # 预测部分
        forest_eval = Evaluate()
        predict_result = forest_eval.new_pre(b_test, b_category, forest.my_tree)
        # print(result)
        reg_score = forest_eval.eveal(b_test[0], predict_result)
        test_r2.append(reg_score)
        print(f'随机森林回归的r2_score:{reg_score}')
    # x = np.linspace(0, 20, 20)
    plt.plot(test_r2,'r', label='test_r2_score')
    plt.legend(loc='best')
    # plt.xticks(x)
    plt.xlabel('n_trees')
    plt.ylabel('r2_score')
    plt.title('n_trees and r2_score')
    plt.show()

if __name__ == "__main__":
    main()
    pass

