#!/usr/bin/env python
# encoding: utf-8
__author__ = 'Gary_Zhang'

"""
omnisiah immortālis,audiās nostram precem,

sumus tuī puerī,multī scholasticī in māchinica viā,

laudāmus scientiam super omnia,prō tuā aeternitāte,dōnum hominibus,

vegetatī ā formā māchinae beātā,cum arte prōficimus,ut mereāmus tuam glōriam

tectī ā ferrō,servātīque ā tuīs daemonibus,labōriōsē circumnāvigāmus apud sīdera

ut reddāmus tua perdita dōna.

deus māchina,tueāris nostra itinera,arceās nōs cum metallīs et lūcibus

quia mundus est vacuum incūriōsum,et hoc curvum nōs ēsurit.

campana māgna pellitur semel,

implēmus vī māchinam

campana māgna pellitur bīs,

ciēmus systēmata,inflāmus vītam

campana māgna pellitur ter,

canimus ut deum omnium māchinārum omnisiaham laudēmus
"""

from sklearn.model_selection import train_test_split  # 划分数据集
import pandas as pd  # for reading csv files
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier


class VOTING:
    '''
    Using VOTING to train the dataset
    Best acc: 0.84  prediction acc: 0.82
    '''

    def __init__(self, feature_columns=[]):
        self.data = pd.read_csv('../data/train/training_data_test.csv', sep=',')
        # print(self.data)

        self.train_data = self.Normalization()  # 数据集
        print(self.train_data.shape)
        # print(self.train_data)

        if len(feature_columns) == 0:

            self.train_labels = self.data.iloc[:, -1]  # 标签
            print(self.train_labels)
            self.test = pd.read_csv('../data/test/songs_to_classify_test.csv', sep=',')
        else:
            column_indices_to_delete = [self.data.columns.get_loc(col) for col in feature_columns]
            self.train_data = np.delete(self.train_data, column_indices_to_delete, axis=1)
            self.train_labels = self.data.iloc[:, -1]  # 标签
            print(self.train_labels.shape)
            # print(self.train_data)
            self.test = pd.read_csv('../data/test/songs_to_classify_test.csv', sep=',')
            scaler = MinMaxScaler()
            self.test = scaler.fit_transform(self.test)
            print(self.test.shape)
            self.test = np.delete(self.test, column_indices_to_delete, axis=1)

        self.VOTING_test()
        self.Normalization()
        # self.Predict()

    def plot(self, x_train, x_test, y_train, y_test):
        # 将训练集和测试集的数据和标签合并
        x_all = np.concatenate((x_train, x_test), axis=0)
        y_all = np.concatenate((y_train, y_test), axis=0)

        # 创建一个三维图形窗口
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        # 根据分类标签绘制不同颜色的散点图
        unique_labels = np.unique(y_all)
        colors = ['r', 'g', 'b', 'y']  # 可以设置自定义的颜色列表
        for label, color in zip(unique_labels, colors):
            # 获取属于当前分类的数据点
            data_points = x_all[y_all == label]
            # 提取三个特征维度
            x = data_points[:, 0]
            y = data_points[:, 1]
            z = data_points[:, 2]
            # 绘制散点图
            ax.scatter(x, y, z, c=color, label=label)

        # 设置坐标轴标签
        ax.set_xlabel('Feature 1')
        ax.set_ylabel('Feature 2')
        ax.set_zlabel('Feature 3')

        # 添加图例
        ax.legend()

        # 显示图形
        plt.show()

    def VOTING_test(self):
        x_train, x_test, y_train, y_test = train_test_split(self.train_data, self.train_labels,
                                                            test_size=0.3)  # 将数据集划分为训练集和测试集， 比例0.3
        clf1 = SVC(decision_function_shape='ovo', kernel='rbf', probability=True)
        clf2 = RandomForestClassifier(n_estimators=20)
        clf3 = MLPClassifier(hidden_layer_sizes=(100, 500, 50, 20), activation='tanh', max_iter=2000)
        clf4 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),
                                  algorithm="SAMME",
                                  n_estimators=600, learning_rate=0.8)
        # 将上面三个基模型集成
        eclf = VotingClassifier(
            # estimators=[('svm', clf1), ('rf', clf2), ('mlp', clf3), ('ada', clf4)],
            # estimators=[('svm', clf1), ('rf', clf2), ('ada', clf4)],
            estimators=[('svm', clf1), ('rf', clf2), ('mlp', clf3)],
            voting='hard')
        eclf.fit(self.train_data, self.train_labels)
        self.eclf_model = eclf

        score = eclf.score(x_test, y_test)
        print("准确度：", score)

        self.plot(x_train, x_test, y_train, y_test)

        # for clf, label in zip([clf1, clf2, clf3, clf4, eclf], ['SVC', 'Random Forest', 'MLP', "AdaBoost", 'Ensemble']):
        # for clf, label in zip([clf1, clf2, clf4, eclf], ['SVC', 'Random Forest', "AdaBoost", 'Ensemble']):
        for clf, label in zip([clf1, clf2, clf3, eclf], ['SVC', 'Random Forest', "MLP", 'Ensemble']):
            # 参数scoring：accuracy cv：5 将数据集分为大小相同的5份，四份训练，一份测试
            # cross_val_score训练模型打分函数
            scores = cross_val_score(clf, self.train_data, self.train_labels, scoring='accuracy', cv=5)
            # scores.mean()分数、scores.std()误差
            print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))

        return scores

    def Normalization(self):
        features = self.data.iloc[:, :-1]
        # 特征正则化
        scaler = MinMaxScaler()
        normalized_features = scaler.fit_transform(features)
        return normalized_features

    def Predict(self):
        X_test = self.test.values
        # 特征正则化
        scaler = MinMaxScaler()
        normalized_X_test = scaler.fit_transform(X_test)

        predictions = self.eclf_model.predict(X=normalized_X_test).reshape(-1, 1).astype(int).reshape(1, -1)
        string = ''.join(map(str, predictions[0]))
        print(string)
        print(predictions)
        return predictions


if __name__ == '__main__':

    VOTING()
    # feature_columns = list(
    #     ["acousticness", "danceability", "duration", "energy", "instrumentalness", "key", "liveness", "loudness",
    #      "mode", "speechiness", "tempo", "time_signature", "valence"])  # 指定要删除的特征列 instrumentalness

    feature_columns = list(
        ["acousticness", "danceability", "duration", "energy", "loudness",
         "mode", "speechiness", "tempo", "time_signature",
         "valence"])  # 指定要删除的特征列 instrumentalness liveness key loudness

    worst_feature = None
    highest_accuracy = 0.0
    feature_names = []  # 存储特征名称
    mean_accuracies = []  # 存储准确率

    for column in feature_columns:
        voting = VOTING(feature_columns=[column])
        scores = voting.VOTING_test()
        mean_accuracy = scores.mean()

        print("-----------------------------------------------")
        print(column)
        print(voting.train_data.shape)
        print(mean_accuracy)
        print("-----------------------------------------------")

        feature_names.append(column)
        mean_accuracies.append(mean_accuracy)

        if mean_accuracy > highest_accuracy:
            highest_accuracy = mean_accuracy
            worst_feature = column

    print("最不好的特征列：", worst_feature)
    print("删除后的准确率：", highest_accuracy)

    # 绘制柱状图
    plt.bar(feature_names, mean_accuracies)

    worst_feature_index = feature_names.index(worst_feature)
    plt.bar(worst_feature_index, mean_accuracies[worst_feature_index], color='red')

    plt.xlabel('Feature')
    plt.ylabel('Mean Accuracy')
    plt.title('Mean Accuracy for Each Feature')
    plt.xticks(rotation=45)  # 旋转 x 轴刻度标签，以避免重叠
    plt.tight_layout()  # 调整图像布局
    plt.show()
