# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""
import time
import math
import numpy as np
from ..env import GLOBAL_SCOPE
import logging
from ..measure import MeasureInput, create_measure_batch,MeasureResult
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
import pickle
logger = logging.getLogger('autotvm')

class fastestTuner(ModelBasedTuner):
    """Tuner that uses xgboost as cost model
    排序n_trial个input，从高到底运行，能运行的就设为最好的
    Parameters
    ----------
    task: Task
        The tuning task
    plan_size: int
        The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
        and do planing for the next `plan_size` trials.
    feature_type: str, optional
        If is 'itervar', use features extracted from IterVar (loop variable).
        If is 'knob', use flatten ConfigEntity directly.
        If is 'curve', use sampled curve feature (relation feature).

        Note on choosing feature type:
        For single task tuning, 'itervar' and 'knob' are good.
                                'itervar' is more accurate but 'knob' is much faster.
                                There are some constraints on 'itervar', if you meet
                                problems with feature extraction when using 'itervar',
                                you can switch to 'knob'.

        For cross-shape tuning (e.g. many convolutions with different shapes),
                               'itervar' and 'curve' has better transferability,
                               'knob' is faster.
        For cross-device or cross-operator tuning, you can use 'curve' only.
    loss_type: str
        If is 'reg', use regression loss to train cost model.
                     The cost model predicts the normalized flops.
        If is 'rank', use pairwise rank loss to train cost model.
                     The cost model predicts relative rank score.
    num_threads: int, optional
        The number of threads.  optimizer: str or ModelOptimizer, optional
        If is 'sa', use a default simulated annealing optimizer.
        Otherwise it should be a ModelOptimizer object.
    diversity_filter_ratio: int or float, optional
        If is not None, the tuner will first select
        top-(plan_size * diversity_filter_ratio) candidates according to the cost model
        and then pick batch_size of them according to the diversity metric.
    log_interval: int, optional
        The verbose level.
        If is 0, output nothing.
        Otherwise, output debug information every `verbose` iterations.
    """
    def __init__(self, task, plan_size=64*2,
                 feature_type='itervar', loss_type='rank', num_threads=8,
                 optimizer='sa', diversity_filter_ratio=None, log_interval=50):
        self.control=True#控制一个batch跑一个还是全跑完
        self.count=0#控制每隔几个batch再更新trail
        cost_model = XGBoostCostModel(task,
                                      feature_type=feature_type,
                                      loss_type=loss_type,
                                      num_threads=num_threads,
                                      log_interval=log_interval // 2)
        if optimizer == 'sa':
            optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
        else:
            assert isinstance(optimizer, ModelOptimizer), "Optimizer must be " \
                                                          "a supported name string" \
                                                          "or a ModelOptimizer object."

        modelfile=open('/home/ubuntu/MyFiles/tvm/python/tvm/autotvm/tuner/rfc.txt','rb')
        classify_model=modelfile.read()
        self.classify_model=pickle.loads(classify_model)

        super(fastestTuner, self).__init__(task, cost_model, optimizer,
                                       plan_size, diversity_filter_ratio)

    def rank(self,inputs):
        """使用运算密度模型为input排序，预测最好的排在前面

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        """
        flops=[]
        features=np.array([0,0,0,0,0,0,0])
        for inp in inputs:
            index=inp.config.index
            d=inp.config.to_json_dict()
            data_shape=inp.task.args[0][1]
            data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
            weight_shape=inp.task.args[1][1]
            weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
            stride=inp.task.args[2][0]
            padding=inp.task.args[3][0]
            y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
            tile_f=d["e"][0][2]
            tile_f[0]=weight_shape[0]/(tile_f[1]*tile_f[2]*tile_f[3])
            tile_y=d["e"][1][2]
            tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
            tile_x=d["e"][2][2]
            tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
            tile_rc=d["e"][3][2]
            tile_rc[0]=data_shape[1]//tile_rc[1]
            tile_ry=d["e"][4][2]
            tile_ry[0]=weight_shape[2]//tile_ry[1]
            tile_rx=d["e"][5][2]
            tile_rx[0]=weight_shape[2]//tile_rx[1]
            warps=tile_f[2]*tile_y[2]*tile_x[2]//32
            threads=tile_f[2]*tile_y[2]*tile_x[2]
            grids=tile_f[0]*tile_y[0]*tile_x[0]
            compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_f[1]*tile_y[1]*tile_x[1]*2
            pad_shared=data/(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])
            if pad_shared<1:
                pad_shared=1
            else:
                pad_shared=math.ceil(pad_shared)+1
            kernel_shared=weight/(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])
            kernel_shared=math.ceil(kernel_shared)
            shared=(pad_shared+kernel_shared)*4*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[2]*tile_y[2]*tile_x[2]/1024
            imax=compute_op/((pad_shared+kernel_shared)*4)
            auto_unroll=d["e"][6][2]
            unroll_explicit=d["e"][7][2]

            alpha=0
            if grids>=100:
                alpha=1
            else:
                alpha=grids/100
            
            beta=0
            if warps==0:
                beta=0
            elif warps<=4 and warps>=1:
                beta=(warps-1)/3
            elif warps>4:
                beta=1/(warps-3)
            flops.append(imax*alpha*beta)
        x=np.array(flops)
        y=np.argsort(-x)
        inputs_rank=[]
        flops_rank=[]
        for i in y:
            inputs_rank.append(inputs[i])
            flops_rank.append(flops[i])
        return inputs_rank,flops_rank
    
    def predict(self,k,kr,flops_rank,res):
        """为一个batch中剩余的未运行值预测runtime(根据已得到的运行值按比例生成)

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        k:当前预测索引
        kr:已运行的索引列表
        flops:预测的flops
        res:已运行的结果列表   
        """
        cost_k=0
        for i in range(len(kr)):
            flops_k=flops_rank[k]
            flops_kr=flops_rank[kr[i]]
            cost_kr=np.mean(res[i][0].costs)
            cost_k+=cost_kr*flops_kr/flops_k
        cost_k/=len(kr)
        result=MeasureResult((cost_k,), 0,0, time.time())
        return result

    def next_batch(self, batch_size):
        ret = []

        counter = 0
        while counter < batch_size:
            if len(self.visited) >= len(self.space):
                break

            while self.trial_pt < len(self.trials):
                index = self.trials[self.trial_pt]
                if index not in self.visited:
                    break
                self.trial_pt += 1

            if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size):
                # if the trial list is empty or
                # the tuner is doing the last 5% trials (e-greedy), choose randomly
                index = np.random.randint(len(self.space))
                while index in self.visited:
                    index = np.random.randint(len(self.space))

            ret.append(self.space.get(index))
            self.visited.add(index)

            counter += 1
        return ret

    def update(self, inputs, results):
        self.count+=1
        for inp, res in zip(inputs, results):
            index = inp.config.index
            if res.error_no == 0:
                self.xs.append(index)
                flops = inp.task.flop / np.mean(res.costs)
                self.flops_max = max(self.flops_max, flops)
                self.ys.append(flops)
            else:
                self.xs.append(index)
                self.ys.append(0.0)

        # if we have enough new training samples
        if len(self.xs) >= self.plan_size * (self.train_ct + 1) \
                and self.flops_max > 1e-6 :
            self.cost_model.fit(self.xs, self.ys, self.plan_size)
            maximums = self.model_optimizer.find_maximums(
                self.cost_model, self.plan_size, self.visited)

            print('use model_optimizer to find')
            self.trials = maximums
            self.trial_pt = 0
            self.train_ct += 1
        # if self.control and self.count>=3:
        #     self.count=0
        #     maximums = self.model_optimizer.find_maximums(
        #         self.cost_model, self.plan_size, self.visited)

        #     print('use old model_optimizer to find')
        #     self.trials = maximums        

    def tune(self, n_trial, measure_option, early_stopping=None, callbacks=()):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, 'n_parallel', 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping

        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        i = error_ct = 0    
        while i < n_trial:
            if not self.has_next():
                break

            #configs = self.next_batch(min(n_parallel, n_trial - i))
            configs = self.next_batch(48)

            inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
            #一个batch有24个config，此处可以选择运算密度最大，block数量和warp数量最均衡的config实际运行，其余config的运行时间以运行的为基准预测
            input_ranks,flops_rank=self.rank(inputs)#使用运算密度model给input排序，预测最好的排在前面
            k=0#input索引
            kr=[]#记录能跑的input的索引
            input_run=[]#记录当前能跑的input
            res_run=[]#记录能跑的input的result
            flag=False#控制一批次中哪些需要跑，哪些用预测
            results=[]
            num_run=0#记录当前循环已经跑的数量
            error_num=0#记录当前循环错误数量
            for inp in input_ranks:
                if flag==False:
                    res=measure_batch([inp,])#当前索引的res
                    results.append(res[0])
                else:
                    #使用运算密度模型预测剩余未运行的inp:   k:当前预测索引  kr:已运行的索引集合 
                    if not kr:
                        continue
                    #res_tmp=self.predict(k,kr,flops_rank,res_run)
                    #剩余的全部设为0
                    res_tmp=MeasureResult((100000,),0,0,time.time())
                    results.append(res_tmp)
                    k+=1
                    #continue
                if res[0].error_no!=0:
                    num_run+=1
                    error_ct+=1
                    error_num+=1
                    input_run.append(inp)
                    res_run.append(res[0])
                else:
                    kr.append(k)
                    input_run.append(inp)
                    res_run.append(res[0])
                    num_run+=1
                    if self.control:
                        flag=True
                    flops=inp.task.flop/np.mean(res[0].costs)
                    config=inp.config
                    if flops>self.best_flops:
                        self.best_flops=flops
                        self.best_config=config
                        self.best_measure_pair=(inp,res)
                        self.best_iter=i+k
                        logger.debug("No: %d\tGFLOPS: %.2f/%.2f\tresult: %s\t%s",
                             i + k + 1, flops / 1e9, self.best_flops / 1e9,
                             res, config)    
                k+=1 
            #i += len(results)
            i+=len(input_ranks)
            self.ttl = min(early_stopping + self.best_iter, n_trial) - i
            self.update(input_ranks, results)
            for callback in callbacks:
                callback(self, input_ranks, results)

            if i >= self.best_iter + early_stopping:
                logger.debug("Early stopped. Best iter: %d.", self.best_iter)
                break

            if error_ct > 150:
                logging.basicConfig()
                logger.warning("Too many errors happen in the tuning. Now is in debug mode")
                logger.setLevel(old_level)
            else:
                logger.setLevel(old_level)
        GLOBAL_SCOPE.in_tuning = False
