import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, precision_score, recall_score, \
    f1_score

df_seq=pd.read_csv('pdb_data_seq.csv')
df_char=pd.read_csv('pdb_data_no_dups.csv')
pd.set_option('display.max_columns', None)  #输出所有列

print('Datasets have been loaded...')
print(df_seq.head())
print(df_char.head())
print(df_seq.info())
print(df_char.info())

protein_char=df_char[df_char.macromoleculeType=='Protein']
protein_seq=df_seq[df_seq.macromoleculeType=='Protein']
print(protein_seq.head())

protein_char=protein_char[['structureId','classification']]
protein_seq=protein_seq[['structureId','sequence']]

model_df=protein_seq.set_index('structureId').join(protein_char.set_index('structureId'))
print(model_df.head(2))
print(model_df.info())
print(model_df.isnull().sum())

model_df=model_df.dropna()
print(model_df.shape[0])

counts=model_df['classification'].value_counts()
print(counts)
print(counts[(counts > 1000)])

types=np.asarray(counts[(counts > 1000)].index)

data=model_df[model_df.classification.isin(types)]
print(data.head(2))

X=data['sequence']
y=data['classification']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size = 0.2, random_state = 1)

vect = CountVectorizer(analyzer='char_wb', ngram_range=(4, 4))
X_train_df = vect.fit_transform(X_train)
X_test_df = vect.transform(X_test)

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import GridSearchCV

'''''''''
param_grid = {'alpha': [0.1, 0.5, 1.0, 2.0, 5.0]}  # 试验不同的 alpha 值
grid_search = GridSearchCV(nb_model, param_grid, cv=5)
grid_search.fit(X_train_df, y_train)
best_alpha = grid_search.best_params_['alpha']

# 拟合模型并进行预测
y_pred = nb_model.fit(X_train_df, y_train).predict(X_test_df)
'''''''''
alphas = [0.1, 0.5, 1.0, 2.0, 5.0]
accuracies = []
precisions = []
recalls = []
f1_scores = []

for alpha in alphas:
    nb_model = MultinomialNB(alpha=alpha)
    nb_model.fit(X_train_df, y_train)
    y_pred = nb_model.predict(X_test_df)

    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred, average='weighted')
    recall = recall_score(y_test, y_pred, average='weighted')
    f1 = f1_score(y_test, y_pred, average='weighted')

    accuracies.append(accuracy)
    precisions.append(precision)
    recalls.append(recall)
    f1_scores.append(f1)

    print(f"Alpha = {alpha}:")
    print(f"Accuracy: {accuracy}")
    print(f"Precision: {precision}")
    print(f"Recall: {recall}")
    print(f"F1-score: {f1}")
    print()

# 绘制精确率、召回率和 F1 分数的变化图
plt.figure()
plt.plot(alphas, accuracies, label='Accuracy')
plt.plot(alphas, precisions, label='Precision')
plt.plot(alphas, recalls, label='Recall')
plt.plot(alphas, f1_scores, label='F1-score')
plt.xlabel('Alpha')
plt.ylabel('Score')
plt.legend()
plt.title('Model Evaluation Metrics')
plt.show()

import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report

# 计算混淆矩阵
cm = confusion_matrix(y_test, y_pred)

# 可视化混淆矩阵
plt.figure(figsize=(10, 8))
sns.heatmap(cm[:10, :10], annot=True, fmt="d", cmap="Blues", xticklabels=y[:10], yticklabels=y[:10])

plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()


# 可视化分类报告
report = classification_report(y_test, y_pred, output_dict=True)
report_df = pd.DataFrame(report).transpose()

plt.figure(figsize=(10, 8))
sns.heatmap(report_df.iloc[:-1, :].astype(float), annot=True, cmap="Blues", fmt=".2f", cbar=False)
plt.title("Classification Report")
plt.show()


df_seq=pd.read_csv('pdb_data_seq.csv')
df_char=pd.read_csv('pdb_data_no_dups.csv')