# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""

import logging
import tempfile
import time

import numpy as np

from tvm.autotvm.measure.measure import MeasureResult

from ..measure import MeasureInput, create_measure_batch
from ..utils import format_si_prefix

from ..env import GLOBAL_SCOPE
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer

logger = logging.getLogger("autotvm")

class XGBTuner(ModelBasedTuner):
    """Tuner that uses xgboost as cost model

    Parameters
    ----------
    task: Task
        The tuning task
    plan_size: int
        The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
        and do planing for the next `plan_size` trials.
    feature_type: str, optional
        If is 'itervar', use features extracted from IterVar (loop variable).
        If is 'knob', use flatten ConfigEntity directly.
        If is 'curve', use sampled curve feature (relation feature).

        Note on choosing feature type:
        For single task tuning, 'itervar' and 'knob' are good.
        'itervar' is more accurate but 'knob' is much faster.
        There are some constraints on 'itervar', if you meet
        problems with feature extraction when using 'itervar',
        you can switch to 'knob'.

        For cross-shape tuning (e.g. many convolutions with different shapes),
        'itervar' and 'curve' has better transferability,
        'knob' is faster.

        For cross-device or cross-operator tuning, you can use 'curve' only.
    loss_type: str
        If is 'reg', use regression loss to train cost model.
        The cost model predicts the normalized flops.
        If is 'rank', use pairwise rank loss to train cost model.
        The cost model predicts relative rank score.

    num_threads: int, optional
        The number of threads.

    optimizer: str or ModelOptimizer, optional
        If is 'sa', use a default simulated annealing optimizer.
        Otherwise it should be a ModelOptimizer object.

    diversity_filter_ratio: int or float, optional
        If is not None, the tuner will first select
        top-(plan_size * diversity_filter_ratio) candidates according to the cost model
        and then pick batch_size of them according to the diversity metric.

    log_interval: int = 50
        The verbose level.
        If is 0, output nothing.
        Otherwise, output debug information every `verbose` iterations.
    """

    def __init__(
        self,
        task,
        plan_size=64,
        feature_type="itervar",
        loss_type="rank",
        num_threads=None,
        optimizer="sa",
        diversity_filter_ratio=None,
        log_interval=50,
    ):
        cost_model = XGBoostCostModel(
            task,
            feature_type=feature_type,
            loss_type=loss_type,
            num_threads=num_threads,
            log_interval=log_interval // 2,
        )
        if optimizer == "sa":
            optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
        else:
            assert isinstance(optimizer, ModelOptimizer), (
                "Optimizer must be " "a supported name string" "or a ModelOptimizer object."
            )

        super(XGBTuner, self).__init__(
            task, cost_model, optimizer, plan_size, diversity_filter_ratio
        )


    def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_prefix="G"):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        si_prefix: str
            One of tvm.autotvm.utils.SI_PREFIXES. The SI prefix to use when reporting FLOPS.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, "n_parallel", 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping

        # Validate si_prefix arg
        format_si_prefix(0, si_prefix)

        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        i = error_ct = 0
        errors = []
        while i < n_trial:
            if not self.has_next():
                break

            configs = self.next_batch(min(n_parallel, n_trial - i))

            inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
            results = measure_batch(inputs)

            # keep best config
            for k, (inp, res) in enumerate(zip(inputs, results)):
                config = inp.config
                if res.error_no == 0:
                    flops = inp.task.flop / np.mean(res.costs)
                    error_ct = 0
                else:
                    flops = 0
                    error_ct += 1
                    error = res.costs[0]
                    if isinstance(error, str):
                        errors.append(error)
                    else:
                        errors.append(str(error))

                if flops > self.best_flops:
                    self.best_flops = flops
                    self.best_config = config
                    self.best_measure_pair = (inp, res)
                    self.best_iter = i + k

                logger.debug(
                    "No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s",
                    i + k + 1,
                    si_prefix,
                    format_si_prefix(flops, si_prefix),
                    format_si_prefix(self.best_flops, si_prefix),
                    res,
                    config,
                )

            i += len(results)
            self.ttl = min(early_stopping + self.best_iter, n_trial) - i

            self.update(inputs, results)
            for callback in callbacks:
                callback(self, inputs, results)

            if i >= self.best_iter + early_stopping:
                logger.debug("Early stopped. Best iter: %d.", self.best_iter)
                break

            if error_ct > 150:
                logging.basicConfig()
                logger.warning("Too many errors happen in the tuning. Switching to debug mode.")
                logger.setLevel(logging.DEBUG)
            else:
                logger.setLevel(old_level)

        if error_ct == i:
            _, f = tempfile.mkstemp(prefix="tvm_tuning_errors_", suffix=".log", text=True)
            with open(f, "w") as file:
                file.write("\n".join(errors))
            logging.warning(
                "Could not find any valid schedule for task %s. "
                "A file containing the errors has been written to %s.",
                self.task,
                f,
            )
        GLOBAL_SCOPE.in_tuning = False
        del measure_batch

    def rank(self,inputs):
        """使用运算密度模型为input排序，预测最好的排在前面

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        """
        features = np.array([0,0,0,0,0,0,0])
        for inp in inputs:
            index=inp.config.index
            d=inp.config.to_json_dict()
            data_shape=inp.task.args[0][1]
            data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
            weight_shape=inp.task.args[1][1]
            weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
            stride=inp.task.args[2][0]
            padding=inp.task.args[3][0]
            y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
            tile_f=d["e"][0][2]
            tile_f[0]=weight_shape[0]/(tile_f[1]*tile_f[2]*tile_f[3])
            tile_y=d["e"][1][2]
            tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
            tile_x=d["e"][2][2]
            tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
            tile_rc=d["e"][3][2]
            tile_rc[0]=data_shape[1]//tile_rc[1]
            tile_ry=d["e"][4][2]
            tile_ry[0]=weight_shape[2]//tile_ry[1]
            tile_rx=d["e"][5][2]
            tile_rx[0]=weight_shape[2]//tile_ry[1]
            warps=tile_f[2]*tile_y[2]*tile_x[2]//32
            threads=tile_f[2]*tile_y[2]*tile_x[2]
            grids=tile_f[0]*tile_y[0]*tile_x[0]
            compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_f[1]*tile_y[1]*tile_x[1]*2
            pad_shared=data//(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])+1
            kernel_shared=weight//(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])+1
            shared=(pad_shared+kernel_shared)*4*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[2]*tile_y[2]*tile_x[2]/1024
            imax=compute_op/((pad_shared+kernel_shared)*4)
            auto_unroll=d["e"][6][2]
            unroll_explicit=d["e"][7][2]

            feature = np.array([imax,grids,warps,threads,auto_unroll,unroll_explicit,shared])

            features = np.row_stack((features,feature))
        # 去掉features的第一行
        features = np.delete(features,[0],axis=0)
        # 这里预测得到的是时间的倒数
        reciprocal = self.fast_model.predict(features)      
        reciprocal = np.abs(reciprocal)
        # 时间
        costtime = 1/(reciprocal+1e-9)
        flops = inputs[0].task.flop/costtime
        x=np.array(reciprocal)
        y=np.argsort(-x)
        inputs_rank=[]  
        for i in y:
            inputs_rank.append(inputs[i])
        return inputs_rank,flops
    
    def predict(self,k,kr,flops_rank,res):
        """为一个batch中剩余的未运行值预测runtime(根据已得到的运行值按比例生成)

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        k:当前预测索引
        kr:已运行的索引
        flops:预测的flops
        res:已运行的结果   
        """
        flops_k=flops_rank[k]
        flops_kr=flops_rank[kr]
        cost_kr=np.mean(res.costs)
        cost_k=cost_kr*flops_kr/flops_k
        result=MeasureResult((cost_k,), 0,0, time.time())
        return result
