#%%
import numpy as np
import pandas as pd

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, roc_auc_score

# %% [markdown]
### 1.导入数据（10分）

#%%
data = load_iris()
# data.target_names
df = pd.DataFrame(data.data, columns=data.feature_names)
df["target"] = data.target
df["color"] = df["target"].apply(lambda x: "r" if x == 0 else ("g" if x == 1 else "b"))
df.info()
#%%
pd.plotting.scatter_matrix(
    df.iloc[:, :-2],
    figsize=(12, 12),
    marker="o",
    c=df.color,
    diagonal="kde",
    alpha=0.5,
    range_padding=0.5,
)

# %% [markdown]
### 2.切分数据集（10分）

#%%
X_train, X_test, y_train, y_test = train_test_split(
    df.iloc[:, :-2], df["target"], test_size=0.2, random_state=1
)
# %%[markdown]
### 3.使用标准化包，对训练集来学习，从而对训练集和测试集来做标准化（20分）
#%%
ss = StandardScaler().fit(X_train)
t_train = ss.transform(X_train)
t_train = pd.DataFrame(t_train, columns=data.feature_names)

pd.plotting.scatter_matrix(
    t_train,
    figsize=(12, 12),
    marker="o",
    c=df.loc[X_train.index, "color"],
    diagonal="kde",
    alpha=0.5,
    range_padding=0.5,
)
#%%

t_test = pd.DataFrame(ss.transform(X_test), columns=data.feature_names)
pd.plotting.scatter_matrix(
    t_test,
    figsize=(12, 12),
    marker="o",
    c=df.loc[X_test.index, "color"],
    diagonal="kde",
    alpha=0.5,
    range_padding=0.5,
)


#%%[markdown]
# 4.在确定l2范式的情况下，使用网格搜索判断solver, C的最优组合（20分)
#%%
LR = LogisticRegression(
    penalty="l2", max_iter=10000, multi_class="multinomial", n_jobs=-1
)
perma = {
    "C": np.linspace(0.5, 1, 19).tolist(),
    "solver": ["liblinear", "sag", "Ibfgs", "newton-cg"],
}
GSCV = GridSearchCV(LR, perma, cv=5, n_jobs=-1)
GSCV.fit(t_train, y_train)
GSCV.best_score_, GSCV.best_params_

# %%[markdown]
""" ### 5.将最优的结果重新用来实例化模型，查看训练集和测试集下的分数（20分）
 (注意多分类需要增加参数  average='micro')
 """
#%%
LR = LogisticRegression(
    penalty="l2",
    max_iter=10000,
    multi_class="multinomial",
    C=GSCV.best_params_["C"],
    solver=GSCV.best_params_["solver"],
    n_jobs=-1,
)
model = LR.fit(t_train, y_train)
model.score(t_train, y_train), model.score(t_test, y_test)
#%%
for k, v in {"train": [t_train, y_train], "test": [t_test, y_test]}.items():
    print(k)
    print(classification_report(v[1], model.predict(v[0])))


# %%[markdown]

""" 
### 6.计算精准率（20分）
|  | T | F |
| -- | -- | -- |
| T | TP | FP |
| F | FN | TN |

准确率 = $ \frac{tp + fp}{tp+tn+fp+fn} $

精确率 = $ \frac{tp}{tp+fp} $

精确率 = $ \frac{tp}{tp+fn} $

"""
# %%
def cals(x, y):
    res_ = pd.DataFrame(y)
    res_["predict"] = model.predict(x)
    crs = pd.crosstab(res_.target, res_.predict).reset_index(drop=True)
    col = crs.columns.tolist()

    res = {}
    for k in col:

        precision = crs.loc[k, k] / crs.loc[:, k].sum()
        c = col.copy()
        c.remove(k)
        recall = crs.loc[k, k] / crs.loc[k, :].sum()
        res[k] = {"精确率": precision, "召回率": recall}

    return res, crs


cals(t_test, y_test)
# %%
