import matplotlib.pyplot as plt
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import MinMaxScaler
import pandas as pd  # for reading csv files
from sklearn.feature_selection import SelectKBest, mutual_info_classif
import numpy as np
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier


def Normalization(data):
    features = data.iloc[:, :-1]
    # 特征正则化
    scaler = MinMaxScaler()
    normalized_features = scaler.fit_transform(features)
    return normalized_features


data = pd.read_csv('../data/train/training_data.csv', sep=',')

train_data = Normalization(data)  # 数据集
train_labels = data.iloc[:, -1]

# 创建VarianceThreshold对象，设置方差阈值
selector = VarianceThreshold(threshold=0.1)

# 应用方差选择到特征矩阵X
X_selected = selector.fit_transform(train_data)

# 绘制特征方差分布
variances = selector.variances_
plt.bar(range(len(variances)), variances)
plt.xlabel('Feature Index')
plt.ylabel('Variance')
plt.title('Variance of Features')
plt.show()

# 创建SelectKBest对象，设置评分函数和选择的特征个数k
selector = SelectKBest(score_func=mutual_info_classif, k=5)

# 应用互信息选择到特征矩阵X和目标变量y
X_selected = selector.fit_transform(train_data, train_labels)

# 获取特征与目标变量的互信息值
scores = selector.scores_

# 绘制特征与目标变量的互信息值
plt.bar(range(len(scores)), scores)
plt.xlabel('Feature Index')
plt.ylabel('Mutual Information')
plt.title('Mutual Information between Features and Target')
plt.show()

# 创建递归特征消除对象，设置基础模型和选择的特征个数
estimator = RandomForestClassifier(n_estimators=20)
selector = RFE(estimator=estimator, n_features_to_select=None)

# 应用递归特征消除到特征矩阵X和目标变量y
X_selected = selector.fit_transform(train_data, train_labels)

# 获取特征排名
rankings = selector.ranking_

# 绘制特征排名
plt.bar(range(len(rankings)), rankings)
plt.xlabel('Feature Index')
plt.ylabel('Ranking')
plt.title('Feature Ranking')
plt.show()