import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score # 精确率
from sklearn.metrics import recall_score # 召回率
from sklearn.metrics import f1_score # F1

# 1.真实的目标值
# 真实数据：6个恶性，4个良性
y_true = ["恶性", "恶性", "恶性", "恶性", "恶性", "恶性", '良性', '良性', '良性', '良性']

# 2.模型A的预测目标值
# 模型A的预测结果：预测对了3个恶性，4个良性
a_y = ["恶性", "恶性", "恶性", '良性', '良性', '良性', '良性', '良性', '良性', '良性']

# 3.模型B的预测目标值
# 模型B的预测结果：预测对了6个恶性，1个良性
b_y = ["恶性", "恶性", "恶性", "恶性", "恶性", "恶性", '良性', "恶性", "恶性", "恶性"]

# 4.构建混淆矩阵
# 4.1 转换成DataFrame对象
a_cm = confusion_matrix(y_true, a_y, labels=["恶性", "良性"])
b_cm = confusion_matrix(y_true, b_y, labels=["恶性", "良性"])

a_df = pd.DataFrame(a_cm, index=["正例(恶性)", '反例(良性)'], columns=["正例(恶性)", '反例(良性)'])
print(a_df)

b_df = pd.DataFrame(b_cm, index=["正例(恶性)", '反例(良性)'], columns=["正例(恶性)", '反例(良性)'])
print(b_df)

# 5.分类问题的评估
# 5.1 精确率 = TP/TP+FP
print(f"A模型的精确率：{precision_score(y_true, a_y, pos_label='恶性')}")
print(f"B模型的精确率：{precision_score(y_true, b_y, pos_label='恶性')}")
print("-"*66)

# 5.2 召回率 = TP/TP+FN
print(f"A模型的召回率：{recall_score(y_true, a_y, pos_label='恶性')}")
print(f"B模型的召回率：{recall_score(y_true, b_y, pos_label='恶性')}")
print("-"*66)

# 5.3 f1分数 = (2*精确率*召回率) / (精确率+召回率)
print(f"A模型的f1分数：{f1_score(y_true, a_y, pos_label='恶性')}")
print(f"B模型的f1分数：{f1_score(y_true, b_y, pos_label='恶性')}")
print("-"*66)

