import joblib
import np as np
from sklearn.impute import SimpleImputer
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import warnings
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.tree import DecisionTreeClassifier

warnings.filterwarnings("ignore", category=UndefinedMetricWarning)

df = pd.read_csv("D:/dashujuwajue/pythonProjectdazuoye/dermatology_database_1.csv")

df.replace('?', np.nan, inplace=True)
df.fillna(df.mean(), inplace=True)
df['age'].replace("?", np.nan, inplace=True)
si = SimpleImputer(missing_values=np.nan,strategy='mean')
si.fit(np.asarray(df['age'].astype(float)).reshape(-1, 1))
df['age'] = si.transform(np.asarray(df['age'].astype(float)).reshape(-1, 1))

X = df.drop('class',axis=1)
y = df['class']
# 一共299个数据20个作为训练集，20个作为测试集
X_train,X_test,y_train,y_test = train_test_split(X, y ,test_size=20/299)
# 划分训练集、校验集、测试集
# 特征处理
train_df, test_df1 = train_test_split(df, test_size=0.2, random_state=42)
train_df, val_df1 = train_test_split(train_df, test_size=0.2, random_state=42)
# 保存数据集
train_df.to_csv('train.csv', index=False)
val_df1.to_csv('val.csv', index=False)
test_df1.to_csv('test.csv', index=False)
# 读取数据集
train_df = pd.read_csv('train.csv')
val_df = pd.read_csv('val.csv')
# 打印一下看看能不能读取
print(train_df)
print(val_df)

train_x = df.loc[:100, :] # 这就是前100行数据
judge_data = df.loc[100:, :] # 待判断数据
print(judge_data)
# 逐列计算最大值和最小值
x_max = train_x.max(axis=0)
x_min = train_x.min(axis=0) # 如果axis=1就是计算每一行的最大值和最小值
print(x_max, "\n", x_min)
# 使用归一化对数据进行处理
# 注意,这里我们要转化一下数据类型,不然可能会报错，字符串与字符串无法使用减号
train_x = train_x.apply(pd.to_numeric)
x_min = x_min.apply(pd.to_numeric)
judge_data = judge_data.apply(pd.to_numeric)
x_max = x_max.apply(pd.to_numeric) # 到这数据转换完毕
standardization = (train_x - x_min) / (x_max - x_min)
# 这里的x2是逆向指标,我们需要做额外的标准化:
standardization.iloc[:, 1] = (x_min[1] - train_x.iloc[:, 1]) / (x_max[1] - x_min[1]) # 到此我们的训练集数据标准化完成
y_0 = np.hstack([np.zeros(5), np.ones(5)]) # 标号值，5个0,5个1
print("训练集数据标准化已完成", "\n", standardization)
# 待判数据标准化
judge_data = (judge_data - x_min) / (x_max - x_min)
# 同样第二列特殊处理
judge_data.iloc[:, 1] = (x_min[1] - judge_data.iloc[:, 1]) / (x_max[1] - x_min[1])

df.to_csv('new_file.csv', index=False)

#决策树模型
model = DecisionTreeClassifier(criterion='gini',splitter='best', max_depth=10,
                               min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.2)

model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print("决策树准确率：")
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(accuracy_score(y_test,y_pred))
print('模型得分: {:2f}'.format(model.score(X_test,y_test)))
y_ = np.array(y_test)
print("决策树预测结果:",model.predict(X_test))
print('--------------------------------------------------------------------------------------')
print('真实结果: ',y_)
# 计算正样本数量
pos_samples = np.sum(y_pred == 1)

# 计算负样本数量
neg_samples = np.sum(y_pred== 0)
print("Positive samples:", pos_samples)
print("Negative samples:", neg_samples)


"""输出测试日志"""

# 完成模型训练，请在这里添加训练代码
joblib.dump(model, 'model.pkl')

import os
file_name = 'model.pkl'
# 文件路径
file_path = os.path.join(os.getcwd(), file_name)
# 判断文件是否存在
if os.path.isfile(file_path):
    print(f"{file_name} 文件存在")
    print(file_path)
else:
    print(f"{file_name} 文件不存在")

# 导入所需的库
import numpy as np
import pandas as pd
import logging
from sklearn.metrics import classification_report
# 配置日志文件
logging.basicConfig(filename='test.log', level=logging.DEBUG)
# 加载测试数据
test_data = pd.read_csv("D:/dashujuwajue/pythonProjectdazuoye/new_file.csv")
# 准备输入数据和实际标签
X_test = test_data.drop('class',axis=1)
y_test = test_data['class']
# 加载已经训练好的模型
# model_path = os.path.join('D:', 'dashujuwajue', 'pythonProjectdazuoye', 'model.pkl')
# model = np.load("D:dashujuwajue/pythonProjectdazuoye/model.pkl")
model = joblib.load('model.pkl')
# 对测试数据进行预测
y_pred = model.predict(X_test)
# 输出分类报告
report = classification_report(y_test, y_pred)
logging.info("Classification report: \n\n %s" % report)
print("Classification report: \n\n %s" % report)
# 输出测试准确率和日志记录
accuracy = np.mean(y_pred == y_test)
logging.info("Test accuracy: %.2f%%" % (accuracy * 100))
print("Test accuracy: %.2f%%" % (accuracy * 100))


"""预测"""

#输出病患的信息
indices = [1,0,1,0,2,0,1,1,0,3,0,2,2,0,2,0,1,1,0,0,2,0,1,0,0,2,2,0,1,0,0,1,0,35]
l = []
for index in indices:
    l.append(index)
empty_indices = {"erythema": l[0], "scaling": l[1], "definite_borders": l[2], "itching": l[3], "koebner_phenomenon": l[4],
                 "polygonal_papules": l[5], "follicular_papules": l[6], "oral_mucosal_involvement": l[7],
                 "knee_and_elbow_involvement": l[8], "scalp_involvement": l[9], "family_history": l[10], "melanin_incontinence": l[11],
                 "eosinophils_infiltrate": l[12], "PNL_infiltrate": l[13], "fibrosis_papillary_dermis": l[14], "exocytosis": l[15],
                 "acanthosis": l[16], "hyperkeratosis": l[17], "parakeratosis": l[18], "clubbing_rete_ridges": l[19], "elongation_rete_ridges": l[20],
                 "thinning_suprapapillary_epidermis": l[21], "spongiform_pustule": l[22], "munro_microabcess": l[23],"focal_hypergranulosis": l[24],
                 "disappearance_granular_layer": l[25], "vacuolisation_damage_basal_layer": l[26], "spongiosis": l[27], "saw_tooth_appearance_retes": l[28],
                 "follicular_horn_plug": l[29],"perifollicular_parakeratosis": l[30],"inflammatory_mononuclear_infiltrate": l[31],
                 "band_like_infiltrate": l[32], "age": l[33]}
data = pd.DataFrame(empty_indices, index=[0])
data = data.set_index('erythema')
data.to_csv("new.csv")
df_1 = pd.read_csv("new.csv")
s = df_1.iloc[0]
print("此病患的数据为：",s)

#输出预测结果
predict_result = model.predict(df_1)
print("预测此病患皮肤病严重程度为：",str(predict_result))


"""评估图"""

import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, roc_curve
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize

# 计算混淆矩阵
cm = confusion_matrix(y_test, y_pred)

# 生成混淆矩阵图表
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
plt.xticks(range(3))
plt.yticks(range(3))
plt.xlabel("Predicted label")
plt.ylabel("True label")
plt.title("Confusion Matrix")
plt.show()

# 将多分类问题转化为二分类问题
y_test_bin = label_binarize(y_test, classes=[0, 1, 2])
y_score_bin = model.predict_proba(X_test)

# 计算ROC曲线和AUC值
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
    fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_score_bin[:, i])
    roc_auc[i] = auc(fpr[i], tpr[i])

# 绘制ROC曲线
plt.figure()
colors = ['red', 'blue', 'green']
for i, color in zip(range(3), colors):
    plt.plot(fpr[i], tpr[i], color=color, lw=2,
             label='ROC curve of class {0} (area = {1:0.2f})'
             ''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()

# 计算模型误差和R方分数
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("MSE: ", mse)
print("R2 score: ", r2)

# 生成散点图
plt.scatter(y_test, y_pred, alpha=0.5)
plt.xlabel("True Values")
plt.ylabel("Predictions")
plt.title("Scatter plot")
plt.show()