#include "common.h"

void test_autograd_examples() {
  pytorch_study::Timer timer("Autograd Test");

  // 1. 基本自动求导
  std::cout << "1. Basic autograd:" << std::endl;
  auto x = torch::tensor({1.0, 2.0, 3.0}, torch::requires_grad());
  auto y = x * x + 2;
  auto z = y.sum();

  z.backward();
  std::cout << "x: " << x << std::endl;
  std::cout << "y: " << y << std::endl;
  std::cout << "z: " << z << std::endl;
  std::cout << "x.grad: " << x.grad() << std::endl;

  // 2. 线性回归示例
  std::cout << "\n2. Linear regression example:" << std::endl;

  // 生成数据
  auto X = torch::linspace(-1, 1, 100).reshape({-1, 1});
  auto Y = 2 * X + 1 + 0.1 * torch::randn(X.sizes());

  // 定义模型参数
  auto w = torch::randn({1, 1}, torch::requires_grad());
  auto b = torch::zeros({1, 1}, torch::requires_grad());

  // 训练循环（简化版）
  float learning_rate = 0.01;
  for (int epoch = 0; epoch < 100; ++epoch) {
    auto predictions = torch::matmul(X, w) + b;
    auto loss = torch::mse_loss(predictions, Y);

    loss.backward();

    // 手动更新参数，不计算梯度
    {
      torch::NoGradGuard no_grad;
      w -= learning_rate * w.grad();
      b -= learning_rate * b.grad();

      // 清零梯度
      w.grad().zero_();
      b.grad().zero_();
    }

    if (epoch % 20 == 0) {
      std::cout << "Epoch " << epoch << ", Loss: " << loss.item<float>()
                << std::endl;
    }
  }

  std::cout << "Final w: " << w << ", b: " << b << std::endl;
}