# day10简单的逻辑回归分类
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt

def logistic_regression_demo(n_samples=100, test_size=0.2, random_state=42):
    np.random.seed(random_state)
    # 随机生成数据，两类分布
    X_class0 = np.random.normal(loc=[2, 2], scale=0.5, size=(n_samples//2, 2))
    X_class1 = np.random.normal(loc=[4, 4], scale=0.5, size=(n_samples//2, 2))
    X = np.vstack([X_class0, X_class1])
    y = np.array([0]*(n_samples//2) + [1]*(n_samples//2))
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state)
    
    # 创建并训练模型
    model = LogisticRegression()
    model.fit(X_train, y_train)
    
    # 预测
    y_pred = model.predict(X_test)
    
    # 输出结果
    print("测试集特征数据:\n", X_test)
    print("\n预测的类别标签:\n", y_pred)
    print("\n实际的类别标签:\n", y_test)
    print(f"\n模型准确率: {accuracy_score(y_test, y_pred):.2f}")
    print("\n模型系数:", model.coef_)
    print("模型截距:", model.intercept_)
    print("\n混淆矩阵:\n", confusion_matrix(y_test, y_pred))
    
    # 可视化：数据点和决策边界
    plt.figure(figsize=(8, 6))
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', s=60, edgecolor='k', label='实际数据点')
    # 绘制决策边界
    x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1
    y_min, y_max = X[:, 1].min()-1, X[:, 1].max()+1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200), np.linspace(y_min, y_max, 200))
    Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
    plt.contourf(xx, yy, Z, alpha=0.2, cmap='viridis')
    plt.title('逻辑回归分类结果与决策边界')
    plt.xlabel('特征1')
    plt.ylabel('特征2')
    plt.legend()
    plt.show()

if __name__ == "__main__":
    logistic_regression_demo(n_samples=100, test_size=0.2, random_state=42) 


    


