import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder,OneHotEncoder,StandardScaler,OrdinalEncoder,LabelBinarizer
from sklearn.metrics import classification_report, accuracy_score, precision_score, recall_score, f1_score, \
    confusion_matrix
from sklearn.model_selection import train_test_split,cross_val_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
# 导入数据集一
df = pd.read_csv ("pdb_data_no_dups.csv")
df.head()

# 导入数据集二
df1 = pd.read_csv ("pdb_data_seq.csv")
df1.head()

# 合并数据集
df2 = df.set_index('structureId').merge(df1.set_index('structureId'),on='structureId',how='left')
df2.head()

print(df2.describe())
print(" The shape of the protein dataset is: " +str(df2.shape))
print(df2.isnull().sum())

# Drop unwanted columns(not relevant for our model)
df3 = df2.drop(['publicationYear', 'chainId','macromoleculeType_x', 'macromoleculeType_y'], axis = 1)
df3.shape

# 删除 DataFrame 中包含缺失值（NaN）的行
df_new=df3.dropna(how='any')
df4 = df_new.drop(['residueCount_x', 'residueCount_y','densityMatthews'],axis=1)
df4['classification'] = df4['classification'].astype(str)

#对 'classification' 列中的唯一值进行计数，得到一个包含唯一值和它们出现次数的 Series。
df_1 = df4['classification'].value_counts()[:10]
print(df_1)

# 从 DataFrame df4 中筛选出 'classification' 列中出现次数超过 15000 次的唯一值，并构建一个新的 DataFrame class_data，其中包含 'classification' 列中只包括这些频繁出现的唯一值的行。
counts = df4.classification.value_counts()
class_data = np.asarray(counts[(counts > 15000)].index)
class_data = df4[df4.classification.isin(class_data)]
class_data.dtypes
cat_transformer = OrdinalEncoder()
cat_features = ['crystallizationMethod', 'experimentalTechnique','pdbxDetails','sequence', 'classification']
# transformed_cat = cat_transformer.fit_transform(class_data[cat_features])
# # class_data[cat_features] = transformed_cat

# 使用 .loc 进行赋值
# class_data.loc[:, cat_features] = cat_transformer.fit_transform(class_data[cat_features])

class_data_copy = class_data[cat_features].copy()
class_data_copy[:] = cat_transformer.fit_transform(class_data_copy)

from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
X = class_data.drop(['classification', 'experimentalTechnique','pdbxDetails', "resolution", "densityPercentSol","crystallizationMethod","pdbxDetails","sequence"],axis=1)
y = class_data.classification
# Classes: 0-3

X_minmax = min_max_scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_minmax, y, test_size=0.2, random_state=42)


# # 定义要尝试的参数值范围
# param_grid = {'n_neighbors': [5]}  # 试验不同的邻居数量
# # 初始化GridSearchCV
# knn = KNeighborsClassifier()
# grid_search = GridSearchCV(knn, param_grid, cv=5)
# # 执行网格搜索来寻找最佳参数组合
# grid_search.fit(X_train, y_train)
# # 输出最佳参数组合和对应的评分
# print("Best Parameters:", grid_search.best_params_)
# print("Best Score:", grid_search.best_score_)
# # 使用最佳参数重新训练模型
# best_knn = grid_search.best_estimator_
# best_knn.fit(X_train, y_train)
#
# # 预测并评估模型性能
# y_pred = best_knn.predict(X_test)
# print(classification_report(y_test, y_pred, target_names=["Hydrolase","Oxidoreductase", "Ribosome", "Transferase"]))
#
# from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
# # 计算混淆矩阵
# conf_matrix = confusion_matrix(y_test, y_pred)
# # 计算精确率
# precision = precision_score(y_test, y_pred, average='micro')
# # 计算召回率
# recall = recall_score(y_test, y_pred, average='micro')
# # 计算F1分数
# f1 = f1_score(y_test, y_pred,average='micro' )
# # # 打印结果
# print("Confusion Matrix:\n", conf_matrix)
# print("Precision:", precision)
# print("Recall:", recall)
# print("F1 Score:", f1)

# 定义要尝试的参数值范围
neighbors = [3, 5, 7, 9]
precision_scores = []
recall_scores = []
f1_scores = []

for n in neighbors:
    param_grid = {'n_neighbors': [n]}
    knn = KNeighborsClassifier(n_neighbors=n)
    knn.fit(X_train, y_train)

    # 预测并计算评估指标
    y_pred = knn.predict(X_test)
    precision = precision_score(y_test, y_pred, average='micro')
    recall = recall_score(y_test, y_pred, average='micro')
    f1 = f1_score(y_test, y_pred, average='micro')

    # 记录评估指标
    precision_scores.append(precision)
    recall_scores.append(recall)
    f1_scores.append(f1)

    # 打印结果
    print(f"For n_neighbors={n}")
    print("Precision:", precision)
    print("Recall:", recall)
    print("F1 Score:", f1)
    print("--------------------")

# 绘制精确率，召回率和F1分数的变化图
plt.plot(neighbors, precision_scores, label='Precision')
plt.plot(neighbors, recall_scores, label='Recall')
plt.plot(neighbors, f1_scores, label='F1 Score')
plt.xlabel('n_neighbors')
plt.ylabel('Score')
plt.xticks(neighbors)
plt.legend()
plt.show()