import random

import numpy as np
import pandas as pd
import sklearn.preprocessing
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, \
    ConfusionMatrixDisplay
import matplotlib.pyplot as plt


# 1
def my_split(data, ratio):
    np.random.shuffle(data)
    flag = int(len(data) * ratio)
    train_set = data[:flag]
    test_set = data[flag:]
    return train_set, test_set


# 2
def my_metrics(y, y_predict):
    t_positive = np.sum((y == 1) & (y_predict == 1))
    t_negative = np.sum((y == 0) & (y_predict == 0))
    f_positive = np.sum((y == 1) & (y_predict == 0))
    f_negative = np.sum((y == 0) & (y_predict == 1))
    accuracy_rate = (t_positive + t_negative) / (t_positive + t_negative + f_positive + f_negative)
    precise_rate = t_positive / (t_positive + f_positive)
    recall_rate = t_negative / (t_negative + f_negative)
    f1_score = 2 * (precise_rate * recall_rate) / (precise_rate + recall_rate)
    return accuracy_rate, precise_rate, recall_rate, f1_score


# 3
iris_set = datasets.load_iris()
# 3.1
X = iris_set.data
Y = iris_set.target
Set = np.hstack((X, Y.reshape(-1, 1)))
# 3.2
set_train, set_test = my_split(Set, 0.7)
X_train, Y_train = set_train[:, :-1], set_train[:, -1]
X_test, Y_test = set_test[:, :-1], set_test[:, -1]
# 3.3
processed_X_train = StandardScaler().fit_transform(X_train)
processed_X_test = StandardScaler().fit_transform(X_test)

# 3.4
regress_model = linear_model.LogisticRegression()
regress_model.fit(processed_X_train, Y_train)
# 3.5
Y_predict = regress_model.predict(processed_X_test)
# 3.6
ac = accuracy_score(Y_test, Y_predict)
pre = precision_score(Y_test, Y_predict, average='macro')
rec = recall_score(Y_test, Y_predict, average='macro')
F1 = f1_score(Y_test, Y_predict, average='macro')
print(f"准确率：{ac}\n精确率：{pre}\n召回率：{rec}\nF1 score：{F1}")

# 3.7
Confusion_matrix = confusion_matrix(Y_test, Y_predict)
disp = ConfusionMatrixDisplay(Confusion_matrix, display_labels=iris_set.target_names)
disp.plot(cmap='coolwarm')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.title('混淆矩阵')
plt.show()

# 3.8
unpro_regress_model = linear_model.LogisticRegression()
unpro_regress_model.fit(X_train, Y_train)
unpro_Y_predict = unpro_regress_model.predict(X_test)
unpro_ac = accuracy_score(Y_test, unpro_Y_predict)
unpro_pre = precision_score(Y_test, unpro_Y_predict, average='macro')
unpro_rec = recall_score(Y_test, unpro_Y_predict, average='macro')
unpro_F1 = f1_score(Y_test, unpro_Y_predict, average='macro')
print(f"不使用标准化处理后：\n准确率：{unpro_ac}\n精确率：{unpro_pre}\n召回率：{unpro_rec}\nF1 score：{unpro_F1}")

# 4
#4.1
x=np.random.random((1000,1))
y=3*x+4+np.random.randn(1000,1)
y=y.reshape(-1,1)
#4.2
var_x = np.c_[np.ones((1000, 1)), x]
#4.3
w = np.linalg.inv(var_x.T.dot(var_x)).dot(var_x.T).dot(y)
bias=w[0]
weight=w[1]
print(f"偏置项：{bias}\n特征权重：{weight}")

#4.4
learning_rate=0.01
iterations=1000
m=len(x)
W=np.random.randn(2,1)
for i in range(iterations):
    MSE=2/m*var_x.T.dot(var_x.dot(W)-y)
    W=W-learning_rate*MSE
print(f"梯度下降算法求出的w：{W}")

#5
learning_rates = [0.01, 0.1, 0.15, 0.2]
plt.figure(figsize=(10, 6))
losses=[]
for lr in learning_rates:
    W_1 = np.random.randn(2, 1)
    cost_history = []
    for iteration in range(iterations):
        gradients = 2 / m * var_x.T.dot(var_x.dot(W_1) - y)
        W_1 = W_1 - lr * gradients
        cost_history.append(np.mean((var_x.dot(W_1) - y) ** 2))
    losses.append(cost_history)

plt.figure(figsize=(10,6))
for i ,lr in enumerate(learning_rates):
    plt.plot(range(iterations),losses[i],label=f'Learning Rate: {lr}')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title('不同学习率对线性回归模型收敛速度的影响')
plt.show()