#include "common.h"

// 简单的神经网络模型定义
struct SimpleNet : torch::nn::Module {
  SimpleNet(int input_size, int hidden_size, int output_size) {
    fc1 = register_module("fc1", torch::nn::Linear(input_size, hidden_size));
    fc2 = register_module("fc2", torch::nn::Linear(hidden_size, output_size));
  }

  torch::Tensor forward(torch::Tensor x) {
    x = torch::relu(fc1->forward(x));
    x = fc2->forward(x);
    return x;
  }

  torch::nn::Linear fc1{nullptr}, fc2{nullptr};
};

void test_neural_network() {
  pytorch_study::Timer timer("Neural Network Test");

  // 1. 使用内置模块
  std::cout << "1. Using built-in modules:" << std::endl;
  auto linear = torch::nn::Linear(10, 5);
  auto input = torch::randn({32, 10});
  auto output = linear->forward(input);
  prtts2(input, "Input");
  prtts2(output, "Linear output");

  // 2. 自定义网络
  std::cout << "\n2. Custom neural network:" << std::endl;
  SimpleNet net(10, 20, 1);
  auto net_input = torch::randn({5, 10});
  auto net_output = net.forward(net_input);
  prtts2(net_output, "Network output");

  // 3. 序列容器
  std::cout << "\n3. Using sequential container:" << std::endl;
  auto seq_net =
      torch::nn::Sequential(torch::nn::Linear(10, 50), torch::nn::ReLU(),
                            torch::nn::Linear(50, 10), torch::nn::Softmax(1));
  auto seq_output = seq_net->forward(net_input);
  prtts2(seq_output, "Sequential output");

  // 4. 优化器使用
  std::cout << "\n4. Optimizer usage:" << std::endl;
  torch::optim::SGD optimizer(net.parameters(), torch::optim::SGDOptions(0.01));

  // 模拟训练步骤
  auto target = torch::randn({5, 1});
  auto loss = torch::mse_loss(net_output, target);

  optimizer.zero_grad();
  loss.backward();
  optimizer.step();

  std::cout << "Loss: " << loss.item<float>() << std::endl;
}