#include "common.h"

// 自定义操作示例函数
torch::Tensor custom_activation(torch::Tensor x) {
  // 实现一个简单的自定义激活函数
  return torch::tanh(x) + 0.1 * x;
}

torch::Tensor custom_loss_function(torch::Tensor predictions,
                                   torch::Tensor targets) {
  // 自定义损失函数：MSE + L1 正则化
  auto mse_loss = torch::mse_loss(predictions, targets);
  auto l1_penalty = torch::norm(predictions, 1);
  return mse_loss + 0.01 * l1_penalty;
}

void test_custom_operators() {
  pytorch_study::Timer timer("Custom Operators Test");

  // 1. 自定义激活函数
  std::cout << "1. Custom activation function:" << std::endl;
  auto x = torch::linspace(-2, 2, 10);
  auto y = custom_activation(x);
  std::cout << "Input: " << x << std::endl;
  std::cout << "Custom activation output: " << y << std::endl;

  // 2. 自定义损失函数
  std::cout << "\n2. Custom loss function:" << std::endl;
  auto pred = torch::randn({5, 1});
  auto target = torch::randn({5, 1});
  auto loss = custom_loss_function(pred, target);
  std::cout << "Custom loss: " << loss.item<float>() << std::endl;

  // 3. 张量的自定义操作
  std::cout << "\n3. Custom tensor operations:" << std::endl;
  auto tensor = torch::randn({3, 4});

  // 使用 torch::where 实现自定义条件操作
  auto thresholded =
      torch::where(tensor > 0.5, tensor, torch::zeros_like(tensor));
  prtts2(tensor, "Original tensor");
  prtts2(thresholded, "Thresholded tensor");

  // 4. 梯度检查
  std::cout << "\n4. Gradient checking with custom function:" << std::endl;
  auto param = torch::tensor({1.0, 2.0, 3.0}, torch::requires_grad());
  auto result = custom_activation(param).sum();
  result.backward();
  std::cout << "Parameter: " << param << std::endl;
  std::cout << "Gradient: " << param.grad() << std::endl;
}