# Copyright 2024 Ant Group Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging

import torch
import torch.nn as nn

from benchmark_examples.autoattack.attacks.base import AttackCase
from secretflow import tune
from secretflow.ml.nn.sl.attacks.exploitattack_torch import ExploitAttack


class BinaryClassifier(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(BinaryClassifier, self).__init__()
        self.layer1 = nn.Linear(input_dim, 64)
        self.relu = nn.ReLU()
        self.layer2 = nn.Linear(64, output_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.layer1(x)
        x = self.relu(x)
        x = self.layer2(x)
        x = self.sigmoid(x)
        return x


def exploit_auxiliary_surrogate_model(input_dim, output_dim=1):
    def create_model():
        model = BinaryClassifier(input_dim, output_dim)
        return model

    return create_model


class ExploitAttackCase(AttackCase):
    def _attack(self):
        self.app.prepare_data()
        neg, pos = self.app.exploit_label_counts()
        total = neg + pos
        pos = torch.tensor(
            pos
        )  # Assuming pos is defined and is a tensor or convertible to a tensor
        total = torch.tensor(
            total
        )  # Assuming total is defined and is a tensor or convertible to a tensor

        p_pos = pos / total
        p_values = torch.tensor([1 - p_pos, p_pos], dtype=torch.float32)

        H_y = -p_pos * torch.log(p_pos) - (1 - p_pos) * torch.log(1 - p_pos)
        y_pri = p_values.unsqueeze(0)  # Adds an extra dimension at axis 0
        exploit_callback = ExploitAttack(
            attack_party=self.app.device_f,
            batch_size=self.app.train_batch_size,
            epochs=1,
            surrogate_model=exploit_auxiliary_surrogate_model(self.app.hidden_size, 1),
            y_pri=y_pri,
            H_y=H_y,
        )
        history = self.app.train(exploit_callback)
        attack_metrics = exploit_callback.get_attack_metrics()
        logging.warning(
            f"RESULT: {type(self.app).__name__} exploit attack metrics = {attack_metrics}"
        )
        return history, attack_metrics

    def attack_search_space(self):
        return {
            'alpha_acc': tune.search.grid_search([0.8, 1]),  # 0 - 1
            'alpha_grad': tune.search.grid_search([0.01, 0.1]),  # 0 -1  log
            'alpha_kl': tune.search.grid_search([0.01, 0.1]),  # 0-1
        }

    def metric_name(self):
        return 'accuracy'

    def metric_mode(self):
        return 'max'
