import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns

class ManualLogisticRegression:
    def __init__(self, learning_rate=0.01, n_iter=1000, verbose=True):
        """
        手动实现逻辑回归
        
        参数:
        learning_rate: 学习率
        n_iter: 迭代次数
        verbose: 是否打印训练过程
        """
        self.learning_rate = learning_rate
        self.n_iter = n_iter
        self.verbose = verbose
        self.weights = None
        self.bias = None
        self.loss_history = []
    
    def sigmoid(self, z):
        """Sigmoid激活函数"""
        # 数值稳定版本，防止溢出
        z = np.clip(z, -500, 500)  # 防止exp溢出
        return 1 / (1 + np.exp(-z))
    
    def initialize_parameters(self, n_features):
        """初始化权重和偏置"""
        self.weights = np.random.randn(n_features) * 0.01
        self.bias = 0.0
    
    def compute_loss(self, y_true, y_pred):
        """计算交叉熵损失"""
        # 避免log(0)的情况
        epsilon = 1e-15
        y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
        
        # 二分类交叉熵损失
        loss = -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
        return loss
    
    def fit(self, X, y):
        """
        训练逻辑回归模型
        
        参数:
        X: 特征矩阵 (m samples, n features)
        y: 标签向量 (m samples,)
        """
        m, n = X.shape
        
        # 初始化参数
        self.initialize_parameters(n)
        
        # 梯度下降
        for i in range(self.n_iter):
            # 前向传播
            linear_output = np.dot(X, self.weights) + self.bias
            y_pred = self.sigmoid(linear_output)
            
            # 计算损失
            loss = self.compute_loss(y, y_pred)
            self.loss_history.append(loss)
            
            # 反向传播 - 计算梯度
            dw = (1 / m) * np.dot(X.T, (y_pred - y))
            db = (1 / m) * np.sum(y_pred - y)
            
            # 更新参数
            self.weights -= self.learning_rate * dw
            self.bias -= self.learning_rate * db
            
            # 打印训练过程
            if self.verbose and i % 100 == 0:
                print(f"Iteration {i}, Loss: {loss:.4f}")
    
    def predict_proba(self, X):
        """预测概率"""
        linear_output = np.dot(X, self.weights) + self.bias
        return self.sigmoid(linear_output)
    
    def predict(self, X, threshold=0.5):
        """预测类别"""
        probabilities = self.predict_proba(X)
        return (probabilities >= threshold).astype(int)
    
    def get_decision_boundary(self, X, threshold=0.5):
        """获取决策边界"""
        # 对于 w1*x1 + w2*x2 + b = 0  => x2 = (-w1*x1 - b) / w2
        if len(self.weights) == 2:  # 仅适用于2D情况
            x1_min, x1_max = X[:, 0].min(), X[:, 0].max()
            x1_values = np.linspace(x1_min, x1_max, 100)
            x2_values = (-self.weights[0] * x1_values - self.bias) / self.weights[1]
            return x1_values, x2_values
        return None, None

# 3. 创建并可视化数据集
print("生成分类数据集...")
X, y = make_classification(
    n_samples=1000, 
    n_features=2, 
    n_redundant=0, 
    n_informative=2,
    n_clusters_per_class=1,
    random_state=42
)

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)

print(f"训练集大小: {X_train.shape}")
print(f"测试集大小: {X_test.shape}")

# 4. 训练手动实现的逻辑回归模型
print("\n训练手动实现的逻辑回归模型...")
manual_lr = ManualLogisticRegression(learning_rate=0.1, n_iter=1000, verbose=True)
manual_lr.fit(X_train, y_train)

# 5. 预测和评估
y_pred = manual_lr.predict(X_test)
y_pred_proba = manual_lr.predict_proba(X_test)

accuracy = accuracy_score(y_test, y_pred)
print(f"\n手动实现逻辑回归准确率: {accuracy:.4f}")

print("\n分类报告:")
print(classification_report(y_test, y_pred))

# 6. 可视化结果
plt.figure(figsize=(15, 5))

# 子图1: 损失函数下降曲线
plt.subplot(1, 3, 1)
plt.plot(manual_lr.loss_history)
plt.xlabel('迭代次数')
plt.ylabel('损失')
plt.title('训练损失下降曲线')
plt.grid(True)

# 子图2: 决策边界
plt.subplot(1, 3, 2)
# 创建网格来绘制决策边界
x_min, x_max = X_scaled[:, 0].min() - 0.5, X_scaled[:, 0].max() + 0.5
y_min, y_max = X_scaled[:, 1].min() - 0.5, X_scaled[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
                     np.linspace(y_min, y_max, 100))

# 预测网格点的概率
Z = manual_lr.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)

# 绘制等高线和散点图
plt.contourf(xx, yy, Z, alpha=0.4, cmap=plt.cm.RdBu)
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=y, cmap=plt.cm.RdBu, edgecolors='black')
plt.xlabel('特征1 (标准化)')
plt.ylabel('特征2 (标准化)')
plt.title('决策边界')

# 子图3: 混淆矩阵
plt.subplot(1, 3, 3)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('混淆矩阵')
plt.ylabel('真实标签')
plt.xlabel('预测标签')

plt.tight_layout()
plt.show()

# 7. 与sklearn的逻辑回归比较
from sklearn.linear_model import LogisticRegression

print("\n与Sklearn逻辑回归比较...")
sklearn_lr = LogisticRegression()
sklearn_lr.fit(X_train, y_train)
sklearn_pred = sklearn_lr.predict(X_test)
sklearn_accuracy = accuracy_score(y_test, sklearn_pred)

print(f"Sklearn逻辑回归准确率: {sklearn_accuracy:.4f}")
print(f"手动实现准确率: {accuracy:.4f}")
print(f"准确率差异: {abs(accuracy - sklearn_accuracy):.4f}")

# 8. 概率预测比较
print("\n前10个样本的概率预测比较:")
print("手动实现概率:", y_pred_proba[:10])
print("Sklearn概率:", sklearn_lr.predict_proba(X_test)[:, 1][:10])