# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
#

from typing import Any, Callable

import torch

from ..utils import run_graph_one_epoch
from .base_quantizer import BaseQuantizer


class PostTrainingStaticQuantizer(BaseQuantizer):
    def __init__(self, dataloader, preprocess: Callable[[Any], Any] = None, **kwargs):
        super(PostTrainingStaticQuantizer, self).__init__(**kwargs)
        self.dataloader = dataloader
        self.preprocess = preprocess

    @torch.no_grad()
    def finetune(self, name="Finetune", dataloader=None):
        super(PostTrainingStaticQuantizer, self).finetune(name)
        dataloader = self.dataloader if dataloader is None else dataloader

        return run_graph_one_epoch(
            self.executor, self.graph, dataloader, self.preprocess, name
        )
