import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

import numpy as np
from ml_lib.core import Tensor
from ml_lib.nn.module import Module
from ml_lib.nn.layers import Linear, ReLU, Sigmoid, Tanh
from ml_lib.nn.loss import MSELoss
from ml_lib.optim import SGD
import time
import matplotlib.pyplot as plt

class SimpleNet(Module):
    """
    简单的两层神经网络
    """
    def __init__(self):
        super().__init__()
        self.fc1 = Linear(2, 6)  # 2输入特征, 5个隐藏单元
        self.relu = Sigmoid()
        self.fc2 = Linear(6, 1)  # 5个隐藏单元, 1个输出
        # self.relu2 = Tanh()
        
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        # x = self.relu2(x)
        return x

def generate_data(n_samples=100):
    """
    生成一个简单的回归数据集: y = sin(x1) * cos(x2)
    """
    X = np.random.uniform(-np.pi, np.pi, (n_samples, 2))
    y = np.sin(X[:, 0])
    y = y.reshape(-1, 1)  # 转换为列向量
    
    # 归一化
    X_mean, X_std = X.mean(axis=0), X.std(axis=0)
    y_mean, y_std = y.mean(), y.std()
    
    X = (X - X_mean) / X_std
    y = (y - y_mean) / y_std
    
    # 分割训练集和测试集
    n_train = int(0.8 * n_samples)
    X_train, y_train = X[:n_train], y[:n_train]
    X_test, y_test = X[n_train:], y[n_train:]
    
    return (Tensor(X_train), Tensor(y_train)), (Tensor(X_test), Tensor(y_test)), (X_mean, X_std, y_mean, y_std)

def train(model, criterion, optimizer, train_data, num_epochs=100, batch_size=16):
    """
    训练模型
    """
    X_train, y_train = train_data
    n_samples = X_train.shape[0]
    
    # 用于存储每个epoch的损失值
    epoch_losses = []
    
    for epoch in range(num_epochs):
        # 打乱数据
        indices = np.random.permutation(n_samples)
        total_loss = 0.0
        
        # 小批量训练
        for i in range(0, n_samples, batch_size):
            batch_indices = indices[i:i+batch_size]
            
            X_batch = Tensor(X_train.data[batch_indices])
            y_batch = Tensor(y_train.data[batch_indices])
            
            # 前向传播
            y_pred = model(X_batch)
            loss = criterion(y_pred, y_batch)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.data.item() if np.isscalar(loss.data) else np.sum(loss.data)
        
        # 计算当前epoch的平均损失
        avg_loss = total_loss / (n_samples / batch_size)
        epoch_losses.append(avg_loss)
        
        # 每5个epoch打印一次
        if (epoch + 1) % 5 == 0:
            print(f"Epoch {epoch+1}/{num_epochs}, Loss: {avg_loss:.4f}")
    
    return epoch_losses

def evaluate(model, test_data):
    """
    评估模型
    """
    X_test, y_test = test_data
    y_pred = model(X_test)
    
    # 计算MSE
    mse = np.mean((y_pred.data - y_test.data) ** 2)
    return mse, y_pred.data

def visualize_training(losses):
    """
    可视化训练过程中的损失变化
    """
    plt.figure(figsize=(10, 5))
    plt.plot(losses, label='Training Loss')
    plt.title('Loss During Training')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)
    plt.savefig('training_loss.png')
    plt.close()
    print("损失曲线已保存为 'training_loss.png'")

def visualize_predictions(X_test, y_test, y_pred, norm_params):
    """
    可视化模型预测结果，使用X的实际值作为横轴
    """
    X_mean, X_std, y_mean, y_std = norm_params
    
    # 反归一化数据
    X_original = X_test * X_std[0] + X_mean[0]  # 只使用第一个特征维度
    if len(X_original.shape) > 1:
        X_original = X_original[:, 0]  # 确保X是一维数组
    
    y_original = y_test * y_std + y_mean
    if len(y_original.shape) > 1:
        y_original = y_original.reshape(-1)  # 将y转换为一维数组
    
    y_pred_original = y_pred * y_std + y_mean
    if len(y_pred_original.shape) > 1:
        y_pred_original = y_pred_original.reshape(-1)  # 将预测值转换为一维数组
    
    # 按X值排序，使曲线更平滑
    sort_idx = np.argsort(X_original)
    X_sorted = X_original[sort_idx]
    y_sorted = y_original[sort_idx]
    y_pred_sorted = y_pred_original[sort_idx]
    
    # 绘制真实函数曲线
    x_smooth = np.linspace(-np.pi, np.pi, 1000)
    y_smooth = np.sin(x_smooth)
    
    plt.figure(figsize=(12, 6))
    
    # 绘制原始数据和预测结果
    plt.scatter(X_sorted, y_sorted, color='blue', alpha=0.5, label='实际数据')
    plt.scatter(X_sorted, y_pred_sorted, color='red', alpha=0.5, label='预测数据')
    
    # 绘制真实的sin函数曲线
    plt.plot(x_smooth, y_smooth, 'g-', linewidth=2, label='sin(x)函数')
    
    plt.title('预测结果对比真实函数')
    plt.xlabel('X值')
    plt.ylabel('Y值')
    plt.legend()
    plt.grid(True)
    plt.savefig('predictions_x_axis.png')
    plt.close()
    print("预测结果已保存为 'predictions_x_axis.png'")

def main():
    np.random.seed(42)  # 固定随机种子以便重现结果
    
    # 生成数据
    train_data, test_data, normalization_params = generate_data(n_samples=1000)
    
    # 创建模型
    model = SimpleNet()
    
    # 定义损失函数和优化器
    criterion = MSELoss()
    optimizer = SGD(model.parameters(), lr=0.04, momentum=0.9)
    
    # 初始评估模型
    initial_mse, _ = evaluate(model, test_data)
    print(f"初始测试集MSE: {initial_mse:.4f}")
    
    # 训练模型
    print("开始训练...")
    start_time = time.time()
    losses = train(model, criterion, optimizer, train_data, num_epochs=1000, batch_size=16)
    end_time = time.time()
    print(f"训练完成，耗时: {end_time - start_time:.2f}秒")
    
    # 评估模型
    final_mse, y_pred = evaluate(model, test_data)
    print(f"训练后测试集MSE: {final_mse:.4f}")
    
    # 可视化训练过程
    visualize_training(losses)
    
    # 可视化预测结果
    X_test, y_test = test_data
    visualize_predictions(X_test.data, y_test.data, y_pred, normalization_params)

if __name__ == "__main__":
    main() 