#!/usr/bin/env python
# encoding: utf-8
__author__ = 'Gary_Zhang'

import pandas as pd  # for reading csv files
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from sklearn import tree  # 导入决策树
from sklearn.model_selection import train_test_split  # 划分数据集
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import time

# DISPALY = True
DISPALY = False


class RandomForests:
    '''
    Using Random Forests to train the dataset
    Best acc: 0.85
    '''

    def __init__(self, feature_columns=[]):
        self.data = pd.read_csv('../data/train/training_data_rf.csv', sep=',')
        # print(self.data)
        self.train_data = self.Normalization()  # 数据集
        print(self.train_data.shape)

        if len(feature_columns) == 0:
            self.train_labels = self.data.iloc[:, -1]  # 标签
            self.test = pd.read_csv('../data/test/songs_to_classify_rf.csv', sep=',')
        else:
            column_indices_to_delete = [self.data.columns.get_loc(col) for col in feature_columns]
            self.train_data = np.delete(self.train_data, column_indices_to_delete, axis=1)
            self.train_labels = self.data.iloc[:, -1]  # 标签
            # print(self.train_data)
            self.test = pd.read_csv('../data/test/songs_to_classify.csv', sep=',')
            scaler = MinMaxScaler()
            self.test = scaler.fit_transform(self.test)
            print(self.test.shape)
            self.test = np.delete(self.test, column_indices_to_delete, axis=1)

        self.SVM_test()
        self.Normalization()
        self.Predict()

    def SVM_test(self):
        x_train, x_test, y_train, y_test = train_test_split(self.train_data, self.train_labels,
                                                            test_size=0.3)  # 将数据集划分为训练集和测试集， 比例0.3
        rfc = RandomForestClassifier(n_estimators=20)
        rfc_s = cross_val_score(rfc, self.train_data, self.train_labels, cv=10)  # k折检验
        print("Random Forests Score: ", rfc_s)
        print("Mean: ", rfc_s.mean())

        clf = tree.DecisionTreeClassifier()
        clf_s = cross_val_score(clf, self.train_data, self.train_labels, cv=10)

        if DISPALY:
            plt.plot(range(1, 11), rfc_s, label="RandomForest")
            plt.plot(range(1, 11), clf_s, label="Decision Tree")
            plt.legend()
            plt.show()  # 输出表格对比

        self.rfc_model = rfc
        clf = clf.fit(x_train, y_train)
        rfc = rfc.fit(x_train, y_train)
        score_c = clf.score(x_test, y_test)
        score_r = rfc.score(x_test, y_test)
        print("Single Tree:{}".format(score_c)
              , "Random Forest:{}".format(score_r)
              )

        if DISPALY:
            scores = []
            times = []
            for i in range(1, 50):
                rfc = RandomForestClassifier(n_estimators=i)
                time1 = time.time()
                rfc = rfc.fit(x_train, y_train)
                score = rfc.score(x_test, y_test)
                time2 = time.time() - time1
                scores.append(score)
                times.append(time2)
            plt.plot(range(1, 50), scores, label="RandomForest")
            plt.legend()
            plt.show()  # 输出表格对比

            plt.plot(range(1, 50), times, label="RandomForest time")
            plt.legend()
            plt.show()  # 输出表格对比

    def Normalization(self):
        features = self.data.iloc[:, :-1]
        # 特征正则化
        scaler = MinMaxScaler()
        normalized_features = scaler.fit_transform(features)
        return normalized_features

    def Predict(self):
        X_test = self.test.values
        # 特征正则化
        scaler = MinMaxScaler()
        normalized_X_test = scaler.fit_transform(X_test)

        predictions = self.rfc_model.predict(X=normalized_X_test).reshape(-1, 1).astype(int).reshape(1, -1)
        print("Prediction: ", predictions)
        return predictions


if __name__ == '__main__':

    RandomForests()
    # feature_columns = list(
    #     ["acousticness", "danceability", "duration", "energy", "instrumentalness", "key", "liveness", "loudness",
    #      "mode", "speechiness", "tempo", "time_signature", "valence"])  # 指定要删除的特征列

    feature_columns = list(
        ["acousticness", "danceability", "duration", "energy", "loudness",
         "speechiness", "tempo", "time_signature", "valence"])  # 指定要删除的特征列 liveness instrumentalness mode key

    worst_feature = None
    highest_accuracy = 0.0
    feature_names = []  # 存储特征名称
    mean_accuracies = []  # 存储准确率

    for column in feature_columns:
        rfc = RandomForests(feature_columns=[column])
        scores = cross_val_score(rfc.rfc_model, rfc.train_data, rfc.train_labels, cv=5, scoring='accuracy')
        mean_accuracy = scores.mean()

        print("-----------------------------------------------")
        print(column)
        print(rfc.train_data.shape)
        print(mean_accuracy)
        print("-----------------------------------------------")

        feature_names.append(column)
        mean_accuracies.append(mean_accuracy)

        if mean_accuracy > highest_accuracy:
            highest_accuracy = mean_accuracy
            worst_feature = column

    print("最不好的特征列：", worst_feature)
    print("删除后的准确率：", highest_accuracy)

    # 绘制柱状图
    plt.bar(feature_names, mean_accuracies)

    worst_feature_index = feature_names.index(worst_feature)
    plt.bar(worst_feature_index, mean_accuracies[worst_feature_index], color='red')

    plt.xlabel('Feature')
    plt.ylabel('Mean Accuracy')
    plt.title('Mean Accuracy for Each Feature')
    plt.xticks(rotation=45)  # 旋转 x 轴刻度标签，以避免重叠
    plt.tight_layout()  # 调整图像布局
    plt.show()
