# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""
import time
import math
import numpy as np
from ..env import GLOBAL_SCOPE
import logging
from ..measure import MeasureInput, create_measure_batch,MeasureResult
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
import pickle
logger = logging.getLogger('autotvm')

class fastestTuner_v4(ModelBasedTuner):
    """Tuner that uses xgboost as cost model
    用来进行参数搜索的tuner
    排序n_trial个input，从高到底运行，能运行的就设为最好的
    Parameters
    ----------
    task: Task
        The tuning task
    plan_size: int
        The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
        and do planing for the next `plan_size` trials.
    feature_type: str, optional
        If is 'itervar', use features extracted from IterVar (loop variable).
        If is 'knob', use flatten ConfigEntity directly.
        If is 'curve', use sampled curve feature (relation feature).

        Note on choosing feature type:
        For single task tuning, 'itervar' and 'knob' are good.
                                'itervar' is more accurate but 'knob' is much faster.
                                There are some constraints on 'itervar', if you meet
                                problems with feature extraction when using 'itervar',
                                you can switch to 'knob'.

        For cross-shape tuning (e.g. many convolutions with different shapes),
                               'itervar' and 'curve' has better transferability,
                               'knob' is faster.
        For cross-device or cross-operator tuning, you can use 'curve' only.
    loss_type: str
        If is 'reg', use regression loss to train cost model.
                     The cost model predicts the normalized flops.
        If is 'rank', use pairwise rank loss to train cost model.
                     The cost model predicts relative rank score.
    num_threads: int, optional
        The number of threads.  optimizer: str or ModelOptimizer, optional
        If is 'sa', use a default simulated annealing optimizer.
        Otherwise it should be a ModelOptimizer object.
    diversity_filter_ratio: int or float, optional
        If is not None, the tuner will first select
        top-(plan_size * diversity_filter_ratio) candidates according to the cost model
        and then pick batch_size of them according to the diversity metric.
    log_interval: int, optional
        The verbose level.
        If is 0, output nothing.
        Otherwise, output debug information every `verbose` iterations.
    """
    def __init__(self, task,imax=1,blocks=80,warp_size=64,multiple_num=1, plan_size=64*2,
                 feature_type='itervar', loss_type='rank', num_threads=8,
                 optimizer='sa', diversity_filter_ratio=None, log_interval=50):
        print("begin to run in tuner fastest v4")
        self.count=0#控制何时开始更新trail，进入下一阶段
        self.best_inp=[]#记录跑过的inp
        self.best_input=None#记录最佳Input
        self.flop_run=[]#记录跑过inp的flops
        # self.imax=24.10#GTX1060
        #self.imax=23.14#GTX1080Ti
        # self.imax=21.75#RTX2080Ti
        self.imax=imax#V100
        #self.blocks=10#GTX1060有10个SM
        #self.blocks=28#GTX1080Ti有28个SM
        #self.blocks=68#GTX2080Ti有68个SM
        self.blocks=blocks#V100有80个SM
        self.warp_size=warp_size # V100一个warp中有64个SP
        self.multiple_num=multiple_num
        cost_model = XGBoostCostModel(task,
                                      feature_type=feature_type,
                                      loss_type=loss_type,
                                      num_threads=num_threads,
                                      log_interval=log_interval // 2)
        if optimizer == 'sa':
            optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
        else:
            assert isinstance(optimizer, ModelOptimizer), "Optimizer must be " \
                                                          "a supported name string" \
                                                          "or a ModelOptimizer object."

        print('imax' + str(imax))
        print('blocks' + str(blocks))
        print('warp_size' + str(warp_size))
        print('multiple_num' + str(multiple_num))
        super(fastestTuner_v4, self).__init__(task, cost_model, optimizer,
                                       plan_size, diversity_filter_ratio)

    def rank(self,inputs):
        """使用运算密度模型为input排序，预测最好的排在前面

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        """
        flops=[]
        for inp in inputs:
            index=inp.config.index
            d=inp.config.to_json_dict()
            data_shape=inp.task.args[0][1]
            data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
            weight_shape=inp.task.args[1][1]
            weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
            stride=inp.task.args[2][0]
            padding=inp.task.args[3][0]
            y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
            # tile_f=d["entity"][0][2]
            # tile_f[0]=weight_shape[0]/(tile_f[1]*tile_f[2]*tile_f[3])
            # tile_y=d["entity"][1][2]
            # tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
            # tile_x=d["entity"][2][2]
            # tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
            # tile_rc=d["entity"][3][2]
            # tile_rc[0]=data_shape[1]//tile_rc[1]
            # tile_ry=d["entity"][4][2]
            # tile_ry[0]=weight_shape[2]//tile_ry[1]
            # tile_rx=d["entity"][5][2]
            # tile_rx[0]=weight_shape[2]//tile_rx[1]
            # warps=tile_f[2]*tile_y[2]*tile_x[2]//self.warp_size
            # threads=tile_f[2]*tile_y[2]*tile_x[2]
            # grids=tile_f[0]*tile_y[0]*tile_x[0]
            # compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[1]*tile_y[1]*tile_x[1]*2
            # pad_shared=data*weight_shape[2]/(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0])
            # # if pad_shared<1:
            # #     pad_shared=1
            # # else:
            # pad_shared=math.ceil(pad_shared)+1
            # kernel_shared=weight/(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0])
            # kernel_shared=math.ceil(kernel_shared)
            # out_data=tile_f[3]*tile_y[3]*tile_x[3]*tile_f[1]*tile_y[1]*tile_x[1]
            # imax=compute_op/((pad_shared+kernel_shared+out_data)*4)
            # auto_unroll=d["entity"][6][2]
            # unroll_explicit=d["entity"][7][2]

            fi = int(d["entity"][1][2]) # block_col_warps
            yi = int(d["entity"][0][2])  # block_row_warps
            ti = int(d["entity"][8][2]) 
            xi = self.warp_size
            rc = data_shape[3]
            ry = weight_shape[1]
            rx = weight_shape[2]
            warp_row_tiles = int(d["entity"][2][2]) 
            warp_col_tiles = int(d["entity"][3][2]) 
            block_factor_n = 16 * warp_row_tiles * yi 
            block_factor_o = 16 * warp_col_tiles * fi
            by = block_factor_n
            bx = block_factor_o
            threads = rc * ry * rx
            grids = fi * yi * xi


            compute_op= fi * yi * xi * rc * ry * rx * 2
            pad_shared = data * weight_shape[1] / (fi*yi*ti*by*bx)
            kernel_shared = weight/(fi * yi * ti * xi)
            out_data = fi * yi * xi
            imax=compute_op/((pad_shared+kernel_shared+out_data)*4)


            alphn=0
            if grids<self.blocks*3:
                alpha=grids/(self.blocks*3)
            else:
                alpha=1
            
            
            beta=0.5
            # if warps<1 or warps>=self.warp_size:
            #     beta=0
            # if warps>1 and warps<self.warp_size:
            #     beta=1

            gama=0
            if imax>self.imax:
                gama=1
            elif imax<=self.imax:
                gama=(imax/self.imax)
            flops.append(alpha*beta*gama)
        x=np.array(flops)
        y=np.argsort(-x)
        inputs_rank=[]
        flops_rank=[]
        for i in y:
            inputs_rank.append(inputs[i])
            flops_rank.append(flops[i])
        return inputs_rank,flops_rank
    def rank_score(self,inp):
        """使用运算密度模型为input排序，预测最好的排在前面

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        """
        index=inp.entity.index
        d=inp.config.to_json_dict()
        data_shape=inp.task.args[0][1]
        data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
        weight_shape=inp.task.args[1][1]
        weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
        stride=inp.task.args[2][0]
        padding=inp.task.args[3][0]
        y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
        # tile_f=d["entity"][0][2]
        # tile_f[0]=weight_shape[0]/(tile_f[1]*tile_f[2]*tile_f[3])
        # tile_y=d["entity"][1][2]
        # tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
        # tile_x=d["entity"][2][2]
        # tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
        # tile_rc=d["entity"][3][2]
        # tile_rc[0]=data_shape[1]//tile_rc[1]
        # tile_ry=d["entity"][4][2]
        # tile_ry[0]=weight_shape[2]//tile_ry[1]
        # tile_rx=d["entity"][5][2]
        # tile_rx[0]=weight_shape[2]//tile_rx[1]
        # warps=tile_f[2]*tile_y[2]*tile_x[2]//self.warp_size
        # threads=tile_f[2]*tile_y[2]*tile_x[2]
        # grids=tile_f[0]*tile_y[0]*tile_x[0]
        # compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[1]*tile_y[1]*tile_x[1]*2
        # pad_shared=data*weight_shape[2]/(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0])
        # # if pad_shared<1:
        # #     pad_shared=1
        # # else:
        # pad_shared=math.ceil(pad_shared)+1
        # kernel_shared=weight/(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0])
        # kernel_shared=math.ceil(kernel_shared)
        # out_data=tile_f[3]*tile_y[3]*tile_x[3]*tile_f[1]*tile_y[1]*tile_x[1]
        # imax=compute_op/((pad_shared+kernel_shared+out_data)*4)
        # auto_unroll=d["entity"][6][2]
        # unroll_explicit=d["entity"][7][2]

        fi = int(d["entity"][1][2]) # block_col_warps
        yi = int(d["entity"][0][2])  # block_row_warps
        ti = int(d["entity"][8][2]) 
        xi = self.warp_size
        rc = data_shape[3]
        ry = weight_shape[1]
        rx = weight_shape[2]
        warp_row_tiles = int(d["entity"][2][2]) 
        warp_col_tiles = int(d["entity"][3][2]) 
        block_factor_n = 16 * warp_row_tiles * yi
        block_factor_o = 16 * warp_col_tiles * fi
        by = block_factor_n
        bx = block_factor_o
        threads = rc * ry * rx
        grids = fi * yi * xi


        compute_op= fi * yi * xi * rc * ry * rx * 2
        pad_shared = data * weight_shape[1] / (fi*yi*xi*by*bx)
        kernel_shared = weight/(fi * yi * ti * xi)
        out_data = fi * yi * xi
        imax=compute_op/((pad_shared+kernel_shared+out_data)*4)
        alpha=0
        if grids<self.blocks*3:
            alpha=grids/(self.blocks*3)
        else:
            alpha=1
        
        
        beta=0.5
        # if warps<1 or warps>=self.warp_size:
        #     beta=0
        # if warps>1 and warps<self.warp_size:
        #     beta=1

        gama=0
        if imax>self.imax:
            gama=1
        elif imax<=self.imax:
            gama=(imax/self.imax)
        return  alpha*beta*gama
        
    def score(self,inp):
        #返回预测分数的三要素:imax,block,thread
        index=inp.config.index
        d=inp.config.to_json_dict()
        data_shape=inp.task.args[0][1]
        data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
        weight_shape=inp.task.args[1][1]
        weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
        stride=inp.task.args[2][0]
        padding=inp.task.args[3][0]
        y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
        # tile_f=d["entity"][0][2]
        # tile_f[0]=weight_shape[0]/(tile_f[1]*tile_f[2]*tile_f[3])
        # tile_y=d["entity"][1][2]
        # tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
        # tile_x=d["entity"][2][2]
        # tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
        # tile_rc=d["entity"][3][2]
        # # print('tile_rc')
        # # print(tile_rc)
        # tile_rc[0]=data_shape[1]//tile_rc[1]
        # tile_ry=d["entity"][4][2]
        # tile_ry[0]=weight_shape[2]//tile_ry[1]
        # tile_rx=d["entity"][5][2]
        # tile_rx[0]=weight_shape[2]//tile_rx[1]
        # warps=tile_f[2]*tile_y[2]*tile_x[2]//32
        # threads=tile_f[2]*tile_y[2]*tile_x[2]
        # grids=tile_f[0]*tile_y[0]*tile_x[0]
        # grid_size=[tile_x[0],tile_y[0],tile_f[0]]
        # block_size=[tile_x[2],tile_y[2],tile_f[2]]
        # compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[1]*tile_y[1]*tile_x[1]*2
        # pad_shared=data/(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0])
        # # if pad_shared<1:
        # #     pad_shared=1
        # # else:
        # pad_shared=math.ceil(pad_shared)+1
        # kernel_shared=weight/(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0])
        # kernel_shared=math.ceil(kernel_shared)
        # out_data=tile_f[3]*tile_y[3]*tile_x[3]*tile_f[1]*tile_y[1]*tile_x[1]
        # imax=compute_op/((pad_shared+kernel_shared+out_data)*4)
        fi = int(d["entity"][1][2]) # block_col_warps
        yi = int(d["entity"][0][2])  # block_row_warps
        ti = int(d["entity"][8][2]) 
        xi = self.warp_size
        rc = data_shape[3]
        ry = weight_shape[1]
        rx = weight_shape[2]
        warp_row_tiles = int(d["entity"][2][2]) 
        warp_col_tiles = int(d["entity"][3][2]) 
        block_factor_n = 16 * warp_row_tiles * yi
        block_factor_o = 16 * warp_col_tiles * fi
        by = block_factor_n
        bx = block_factor_o
        threads = rc * ry * rx
        grids = fi * yi * xi


        compute_op= fi * yi * xi * rc * ry * rx * 2
        pad_shared = data * weight_shape[1] / (fi*yi*xi*by*bx)
        kernel_shared = weight/(fi * yi * ti * xi)
        out_data = fi * yi * xi
        imax=compute_op/((pad_shared+kernel_shared+out_data)*4)
        return [imax,grids,threads]



    def next_batch(self, batch_size):
        ret = []

        counter = 0
        while counter < batch_size:
            if len(self.visited) >= len(self.space):
                break

            while self.trial_pt < len(self.trials):
                index = self.trials[self.trial_pt]
                if index not in self.visited:
                    break
                self.trial_pt += 1

            if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size):
                # if the trial list is empty or
                # the tuner is doing the last 5% trials (e-greedy), choose randomly
                index = np.random.randint(len(self.space))
                while index in self.visited:
                    index = np.random.randint(len(self.space))

            ret.append(self.space.get(index))
            counter += 1
        return ret

    def update(self, inputs, results):
        self.count+=1
        # if we have enough new training samples
        index=1#测试用计数
        if self.count>=self.plan_size*self.multiple_num:
            maximums=[]
            x=np.array(self.flop_run)
            y=np.argsort(-x)
            inp_run=[]
            for i in y:
                inp_run.append(self.best_inp[i])
            for i in range(1):
                while len(maximums)<self.plan_size*(i+1):
                    new_configs=self.next_batch(1000)
                    for config in new_configs:
                        tmp_input=MeasureInput(self.task.target, self.task, config) 
                        tmp_flops_rank = self.rank_score(tmp_input)
                        run_flops_rank = self.rank_score(inp_run[i])
                        if self.score(tmp_input)==self.score(inp_run[i]):
                            maximums.append(config)
                            #print(index)
                            index+=1
                        # 二阶段config扩大筛选范围
                        elif abs(tmp_flops_rank - run_flops_rank) <= 0.01:
                            maximums.append(config)
                            index+=1 
                print('use model_optimizer to find')   
            self.trials = maximums
    

    def tune(self, n_trial, measure_option, early_stopping=None, callbacks=()):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, 'n_parallel', 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping

        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        i = error_ct = 0    

        print("begin to tuner in tuner fastest v4")

        #第一阶段：初步筛选出较优解和与之分数一致的备选config
        while i < n_trial:
            if not self.has_next():
                break

            #configs = self.next_batch(min(n_parallel, n_trial - i))
            configs = self.next_batch(1000)
            inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
            

            #print('begin to run flops_rank!!!')
            input_ranks,flops_rank=self.rank(inputs)#使用运算密度model给input排序，预测最好的排在前面
            #print('end to run flops_rank!!!')
            k=0#input索引
            input_run=[]#记录当前能跑的input
            res_run=[]#记录能跑的input的result
            flag=False#控制当前inp是否需要run
            for inp in input_ranks:
                if flag==False:
                    
                    res=measure_batch([inp,])#当前索引的res
                    self.visited.add(inp.config.index)
                    with open('/home/sxh/tvmfile/tuning_process_result.txt', 'a', encoding='utf-8') as file:
                        file.write(str(inp) + ',' + str(res) + '\n')
                else:
                    k+=1
                    continue
                if res[0].error_no!=0:
                    error_ct+=1
                    input_run.append(inp)
                    res_run.append(res[0])
                else:
                    print('error_no is 0')
                    input_run.append(inp)
                    res_run.append(res[0])
                    flag=True
                    flops=inp.task.flop/np.mean(res[0].costs)
                    config=inp.config
                    self.best_inp.append(inp)
                    self.flop_run.append(flops)
                    if flops>self.best_flops:
                        self.best_flops=flops
                        self.best_config=config
                        self.best_measure_pair=(inp,res)
                        self.best_iter=i+k
                        logger.debug("No: %d\tGFLOPS: %.2f/%.2f\tresult: %s\t%s",
                             i + k + 1, flops / 1e9, self.best_flops / 1e9,
                             res, config)    
                k+=1 
            i+=len(res_run)
            self.ttl = min(early_stopping + self.best_iter, n_trial) - i
            self.update(input_run, res_run)
            
            if len(self.trials)>0:
                #进入第二阶段
                break
            
            for callback in callbacks:
                callback(self, input_run, res_run)

            if i >= self.best_iter + early_stopping:
                logger.debug("Early stopped. Best iter: %d.", self.best_iter)
                print("Early stopped. Best iter: %d.", self.best_iter)
                #early stoping 之后也能进入第二阶段 
                break

            if error_ct > 150:
                logging.basicConfig()
                logger.warning("Too many errors happen in the tuning. Now is in debug mode")
                logger.setLevel(old_level)
            else:
                logger.setLevel(old_level)

        #第二阶段：在备选config中继续Tuning,全部都跑
        print('进入第二阶段')
        configs=self.trials
        inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
        #results = measure_batch(inputs)
        input_run=[]
        res_run=[]
        self.best_input=self.best_measure_pair[0]#记录第二阶段最好的inp
        # keep best config
        for inp in inputs:
            res=measure_batch([inp,])
            self.visited.add(inp.config.index)
            input_run.append(inp)
            res_run.append(res[0])
            if res[0].error_no == 0:
                flops = inp.task.flop / np.mean(res[0].costs)
            else:
                flops = 0

            if flops > self.best_flops:
                self.best_input=inp
                self.best_flops = flops

            for callback in callbacks:
                callback(self, [inp,], res)
        # 记录跑过的所有最优flops
        # with open('/root/tvmfile/tuning_params_result.txt', 'a', encoding='utf-8') as file:
        #     file.write(str(self.best_flops) + '\n')

        #第三阶段：全部跑完后找到最佳的config，找到与之block,thread,imax,tile_rc,rx,ry一致的config并更新
        # self.trials=[]
        # self.update2()
        # configs=self.trials
        # inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
        # for inp in inputs:
        #     res=measure_batch([inp,])
        #     self.visited.add(inp.config.index)
        #     if res[0].error_no == 0:
        #         flops = inp.task.flop / np.mean(res[0].costs)
        #     else:
        #         flops = 0

        #     if flops > self.best_flops:
        #         self.best_flops = flops

        #     for callback in callbacks:
        #         callback(self, [inp,], res)        

        GLOBAL_SCOPE.in_tuning = False
