import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from config import *
import joblib

import matplotlib

matplotlib.use('TKAgg')

df = pd.read_csv(data_path)

df = df.drop_duplicates()

print(len(df))

df = df[(df['ap_hi'] >= 50) & (df['ap_hi'] <= 200)]
df = df[(df['ap_lo'] >= 40) & (df['ap_lo'] <= 150)]
df = df[(df['height'] >= 140) & (df['height'] <= 210)]
df = df[(df['weight'] >= 35) & (df['weight'] <= 120)]
df = df[(df['age'] >= 0) & (df['age'] <= 110)]

df['height'] = df['weight'] / ((df['height'] / 100) ** 2)
df = df.rename(columns={'height': 'BMI'})
df.drop(['weight'], axis=1, inplace=True)

X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)

sns.set_theme(style='darkgrid')
sns.set_context('notebook')

column = ['bmi', 'ap_hi', 'ap_lo']

# def remove_outliers_iqr(df,column):
#     Q1 = df[column].quantile(0.25)
#     Q3 = df[column].quantile(0.75)
#     IQR = Q3 - Q1
#
#     lower_bound = Q1 - 1.5 * IQR
#     upper_bound = Q3 + 1.5 * IQR
#
#     df = df[(df[column] >= lower_bound) & (df[column] <= upper_bound)]
#     return df
#
#
# for col in column:
#     df = remove_outliers_iqr(df, col)

scaler = MinMaxScaler()
X_train_scaled = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns)

scaler_filename = "scaler.save"
joblib.dump(scaler, scaler_filename)

svc = SVC(kernel='linear')
svc.fit(X_train_scaled, y_train)
y_train_pred = svc.predict(X_train_scaled)
train_accuracy = accuracy_score(y_train, y_train_pred)
print("训练集上的准确率:", train_accuracy)

if train_accuracy == 1:
    print("数据完全线性可分")
else:
    print("数据不完全线性可分")

print("show some", X_train_scaled.head())

print(len(X_train_scaled))

min = df.min()
max = df.max()

print("\nMinimum values in each column:")
print(min)
print("\nMaximum values in each column:")
print(max)

for col in X_train_scaled.columns:
    plt.figure(figsize=(8, 4))
    sns.boxplot(x=X_train_scaled[col])
    plt.title(f'Boxplot of {col} After Normalization')
    plt.xlabel(col)
    plt.grid(True)
    plt.savefig(f'data_figure/{col}_normalized_boxplot.png')
    plt.close()

train_scaled = X_train_scaled.copy()
train_scaled['cardio'] = y_train.values

correlation_matrix = train_scaled.corr()

plt.figure(figsize=(12, 8))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', linewidths=.5)
plt.title('Correlation Matrix Heatmap')
plt.savefig(figure_path['HeatMap'])

preprocessed_xtrain_path = 'data/cardio_xtrain_preprocessed.csv'
preprocessed_xtest_path = 'data/cardio_xtest_preprocessed.csv'
preprocessed_ytest_path = 'data/y_test.csv'
preprocessed_ytrain_path = 'data/y_train.csv'
X_train_scaled.to_csv(preprocessed_xtrain_path, index=False)
X_test_scaled.to_csv(preprocessed_xtest_path, index=False)
y_test.to_csv(preprocessed_ytest_path, index=False)
y_train.to_csv(preprocessed_ytrain_path, index=False)

print("训练集特征大小:", X_train_scaled.shape)
print("训练集目标大小:", y_train.shape)
print("测试集特征大小:", X_test_scaled.shape)
print("测试集目标大小:", y_test.shape)
