#!/usr/bin/env python
# encoding: utf-8
__author__ = 'Gary_Zhang'

import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt


class KNN:
    '''
    Using Knn to train the dataset
    Accuracy: 0.828
    '''

    def __init__(self, feature_columns=[], k=10):
        self.data = pd.read_csv('../data/train/training_data_knn.csv', sep=',')  # key mode time_signature
        self.train_data = self.Normalization()
        print(self.train_data.shape)

        if len(feature_columns) == 0:
            self.train_labels = self.data.iloc[:, -1]  # labels
            self.test = pd.read_csv('../data/test/songs_to_classify.csv', sep=',')
        else:
            column_indices_to_delete = [self.data.columns.get_loc(col) for col in feature_columns]
            self.train_data = np.delete(self.train_data, column_indices_to_delete, axis=1)
            self.train_labels = self.data.iloc[:, -1]  # labels
            self.test = pd.read_csv('../data/test/songs_to_classify.csv', sep=',')
            scaler = MinMaxScaler()
            self.test = scaler.fit_transform(self.test)
            print(self.test.shape)
            self.test = np.delete(self.test, column_indices_to_delete, axis=1)

        self.k = k
        self.KNN_test()
        self.Normalization()

    def KNN_test(self):
        x_train, x_test, y_train, y_test = train_test_split(self.train_data, self.train_labels,
                                                            test_size=0.3)
        knn = KNeighborsClassifier(n_neighbors=self.k, weights='distance')
        knn.fit(x_train, y_train)
        score = knn.score(x_test, y_test)
        print("Accuracy:", score)

        knn.fit(self.train_data, self.train_labels)
        self.knn_model = knn
        scores = cross_val_score(knn, self.train_data, self.train_labels, cv=10,
                                 scoring='accuracy')
        print('Cross-validation Accuracy:', scores)
        print('Mean Accuracy: ', scores.mean())

    def Normalization(self):
        features = self.data.iloc[:, :-1]
        scaler = MinMaxScaler()
        normalized_features = scaler.fit_transform(features)
        return normalized_features

    def Predict(self):
        X_test = self.test.values
        scaler = MinMaxScaler()
        normalized_X_test = scaler.fit_transform(X_test)

        predictions = self.knn_model.predict(X=normalized_X_test).reshape(-1, 1).astype(int).reshape(1, -1)
        print(predictions)
        return predictions


if __name__ == '__main__':
    KNN(k=10)
    accuracies = []
    for i in range(1, 20):
        knn_k = KNN(k=i)
        scores = cross_val_score(knn_k.knn_model, knn_k.train_data, knn_k.train_labels, cv=5, scoring='accuracy')
        mean_accuracy = scores.mean()
        print("++++++++++++++++++++")
        print(i)
        print(mean_accuracy)
        print("--------------------")
        accuracies.append(mean_accuracy)

    # Plotting the line graph
    plt.plot(range(1, 20), accuracies, marker='o')
    plt.xlabel('K')
    plt.ylabel('Mean Accuracy')
    plt.title('KNN Mean Accuracy for Different K')
    plt.show()

    feature_columns = list(
        ["acousticness", "danceability", "duration", "energy", "instrumentalness", "liveness", "loudness",
         "speechiness", "tempo", "valence"])  # Specify the feature columns to delete

    worst_feature = None
    highest_accuracy = 0.0
    feature_names = []  # Store the feature names
    mean_accuracies = []  # Store the mean accuracies

    for column in feature_columns:
        knn = KNN(feature_columns=[column], k=10)
        scores = cross_val_score(knn.knn_model, knn.train_data, knn.train_labels, cv=5, scoring='accuracy')
        mean_accuracy = scores.mean()

        print("-----------------------------------------------")
        print(column)
        print(mean_accuracy)
        print("-----------------------------------------------")

        feature_names.append(column)
        mean_accuracies.append(mean_accuracy)

        if mean_accuracy > highest_accuracy:
            highest_accuracy = mean_accuracy
            worst_feature = column

    print("Worst feature column:", worst_feature)
    print("Accuracy after deletion:", highest_accuracy)

    # 绘制柱状图
    plt.bar(feature_names, mean_accuracies)

    worst_feature_index = feature_names.index(worst_feature)
    plt.bar(worst_feature_index, mean_accuracies[worst_feature_index], color='red')

    plt.xlabel('Feature')
    plt.ylabel('Mean Accuracy')
    plt.title('Mean Accuracy for Each Feature')
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.show()