from sklearn import svm
from sklearn.metrics import recall_score,accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import os
import numpy as np
# import smote_variants as sv
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from imblearn.over_sampling import SVMSMOTE,RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
fold_path = 'data_5fold_new/3_fold'
data_path = os.path.join(fold_path, 'feat')
label_path = os.path.join(fold_path, 'label')
X_train = np.load(data_path + '/train_SKCNN_feature.npy', allow_pickle=True)
X_test = np.load(data_path + '/vali_SKCNN_feature.npy', allow_pickle=True)
y_train = np.load(label_path + '/train_label.npy', allow_pickle=True)
y_test = np.load(label_path + '/test_label.npy', allow_pickle=True)

# sampler = RandomUnderSampler()
sampler = RandomOverSampler()
# sampler = SVMSMOTE()

X_train1, y_train1= sampler.fit_resample(X_train, y_train)
class_count = [0] * 3
for label in y_train1 :
    class_count[label] += 1
print("vali_set:", 'absent:', class_count[0], 'soft:', class_count[1], 'loud:', class_count[2])
# model = svm.SVC()
# model = LogisticRegression()
model = XGBClassifier(n_estimators=40,random_state=2021,eval_metric=['logloss'])

model.fit(X_train1, y_train1)
y_pred = model.predict(X_test)

# 计算平均准确率
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: {:.4f}".format(accuracy))

# 计算混淆矩阵
confusion = confusion_matrix(y_test, y_pred)
print("Confusion Matrix:\n", confusion)

# 计算召回率、F1分数等
report = classification_report(y_test, y_pred)
print("Classification Report:\n", report)