import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score

class BPNeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size, lr=0.01, task='classification'):
        self.lr = lr
        self.task = task
        
        # 权重初始化
        self.W1 = np.random.randn(input_size, hidden_size) * 0.1
        self.b1 = np.zeros((1, hidden_size))
        self.W2 = np.random.randn(hidden_size, output_size) * 0.1
        self.b2 = np.zeros((1, output_size))
        
        self.losses = []
    
    def sigmoid(self, x):
        return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
    
    def forward(self, X):
        self.z1 = np.dot(X, self.W1) + self.b1
        self.a1 = self.sigmoid(self.z1)
        self.z2 = np.dot(self.a1, self.W2) + self.b2
        self.a2 = self.sigmoid(self.z2) if self.task == 'classification' else self.z2
        return self.a2
    
    def backward(self, X, y, output):
        m = X.shape[0]
        
        # 输出层梯度
        dz2 = output - y
        dW2 = np.dot(self.a1.T, dz2) / m
        db2 = np.sum(dz2, axis=0, keepdims=True) / m
        
        # 隐藏层梯度
        dz1 = np.dot(dz2, self.W2.T) * self.a1 * (1 - self.a1)
        dW1 = np.dot(X.T, dz1) / m
        db1 = np.sum(dz1, axis=0, keepdims=True) / m
        
        # 更新权重
        self.W2 -= self.lr * dW2
        self.b2 -= self.lr * db2
        self.W1 -= self.lr * dW1
        self.b1 -= self.lr * db1
    
    def train(self, X, y, epochs=1000):
        for epoch in range(epochs):
            output = self.forward(X)
            self.backward(X, y, output)
            
            # 计算损失
            if self.task == 'classification':
                loss = -np.mean(y * np.log(output + 1e-8) + (1-y) * np.log(1-output + 1e-8))
            else:
                loss = np.mean((output - y)**2)
            
            self.losses.append(loss)
            
            if epoch % 200 == 0:
                print(f'Epoch {epoch}, Loss: {loss:.4f}')
    
    def predict(self, X):
        output = self.forward(X)
        return (output > 0.5).astype(int) if self.task == 'classification' else output

# 生成客户流失数据
def generate_churn_data(n=1000):
    np.random.seed(42)
    tenure = np.random.randint(1, 73, n)
    monthly_charges = np.random.uniform(20, 120, n)
    total_charges = tenure * monthly_charges + np.random.normal(0, 200, n)
    
    # 流失逻辑：新客户 + 高费用 = 高流失率
    churn_prob = 0.8 - 0.01 * tenure + 0.005 * monthly_charges + np.random.normal(0, 0.1, n)
    churn = (np.random.rand(n) < np.clip(churn_prob, 0, 1)).astype(int)
    
    return np.column_stack([tenure, monthly_charges, total_charges]), churn.reshape(-1, 1)

# 生成学生成绩数据
def generate_student_data(n=1000):
    np.random.seed(42)
    hours = np.random.uniform(50, 200, n)
    previous = np.random.uniform(40, 100, n)
    attendance = np.random.uniform(60, 100, n)
    
    # 成绩逻辑：学习时间 + 期中成绩 + 出勤率
    final = 0.2 * hours + 0.6 * previous + 0.2 * attendance + np.random.normal(0, 5, n)
    final = np.clip(final, 0, 100)
    
    return np.column_stack([hours, previous, attendance]), final.reshape(-1, 1)

# 任务1：客户流失预测
print("=== 客户流失预测 ===")
X_churn, y_churn = generate_churn_data()

# 标准化
scaler_churn = StandardScaler()
X_churn_scaled = scaler_churn.fit_transform(X_churn)

# 分割数据
X_train, X_test, y_train, y_test = train_test_split(X_churn_scaled, y_churn, test_size=0.2, random_state=42)

# 训练模型
model_churn = BPNeuralNetwork(3, 8, 1, lr=0.1, task='classification')
model_churn.train(X_train, y_train, epochs=1000)

# 评估
y_pred = model_churn.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'准确率: {accuracy:.4f}')

# 预测示例
sample = np.array([[24, 65.5, 1572.0]])
sample_scaled = scaler_churn.transform(sample)
pred_prob = model_churn.forward(sample_scaled)[0][0]
print(f'客户流失概率: {pred_prob:.2%}')

# 任务2：学生成绩预测
print("\n=== 学生成绩预测 ===")
X_student, y_student = generate_student_data()

# 标准化
scaler_student_X = StandardScaler()
scaler_student_y = StandardScaler()
X_student_scaled = scaler_student_X.fit_transform(X_student)
y_student_scaled = scaler_student_y.fit_transform(y_student)

# 分割数据
X_train, X_test, y_train, y_test = train_test_split(X_student_scaled, y_student_scaled, test_size=0.2, random_state=42)

# 训练模型
model_student = BPNeuralNetwork(3, 10, 1, lr=0.01, task='regression')
model_student.train(X_train, y_train, epochs=1000)

# 评估
y_pred_scaled = model_student.predict(X_test)
y_pred = scaler_student_y.inverse_transform(y_pred_scaled)
y_test_original = scaler_student_y.inverse_transform(y_test)

mse = mean_squared_error(y_test_original, y_pred)
r2 = r2_score(y_test_original, y_pred)
print(f'MSE: {mse:.4f}, R²: {r2:.4f}')

# 预测示例
sample = np.array([[120, 85, 95]])
sample_scaled = scaler_student_X.transform(sample)
pred_scaled = model_student.predict(sample_scaled)
pred_score = scaler_student_y.inverse_transform(pred_scaled)[0][0]
print(f'预测期末成绩: {pred_score:.2f}分')

# 绘制损失曲线
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(model_churn.losses)
plt.title('客户流失预测 - 训练损失')
plt.xlabel('Epoch')
plt.ylabel('Loss')

plt.subplot(1, 2, 2)
plt.plot(model_student.losses)
plt.title('学生成绩预测 - 训练损失')
plt.xlabel('Epoch')
plt.ylabel('Loss')

plt.tight_layout()
plt.show()