"""Tuner that uses xgboost as cost model"""
import time
import numpy as np

from tvm.autotvm.task.space import ConfigEntity
from ..env import GLOBAL_SCOPE
import logging
from ..measure import MeasureInput, create_measure_batch,MeasureResult
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
import math
import random
logger = logging.getLogger('autotvm')

class raftXGBTuner(ModelBasedTuner):
    """Tuner that uses xgboost as cost model

    Parameters
    ----------
    task: Task
        The tuning task
    plan_size: int
        The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
        and do planing for the next `plan_size` trials.
    feature_type: str, optional
        If is 'itervar', use features extracted from IterVar (loop variable).
        If is 'knob', use flatten ConfigEntity directly.
        If is 'curve', use sampled curve feature (relation feature).

        Note on choosing feature type:
        For single task tuning, 'itervar' and 'knob' are good.
                                'itervar' is more accurate but 'knob' is much faster.
                                There are some constraints on 'itervar', if you meet
                                problems with feature extraction when using 'itervar',
                                you can switch to 'knob'.

        For cross-shape tuning (e.g. many convolutions with different shapes),
                               'itervar' and 'curve' has better transferability,
                               'knob' is faster.
        For cross-device or cross-operator tuning, you can use 'curve' only.
    loss_type: str
        If is 'reg', use regression loss to train cost model.
                     The cost model predicts the normalized flops.
        If is 'rank', use pairwise rank loss to train cost model.
                     The cost model predicts relative rank score.
    num_threads: int, optional
        The number of threads.  optimizer: str or ModelOptimizer, optional
        If is 'sa', use a default simulated annealing optimizer.
        Otherwise it should be a ModelOptimizer object.
    diversity_filter_ratio: int or float, optional
        If is not None, the tuner will first select
        top-(plan_size * diversity_filter_ratio) candidates according to the cost model
        and then pick batch_size of them according to the diversity metric.
    log_interval: int, optional
        The verbose level.
        If is 0, output nothing.
        Otherwise, output debug information every `verbose` iterations.
    """
    def __init__(self,
        task,
        plan_size=64,
        feature_type="itervar",
        loss_type="rank",
        num_threads=None,
        optimizer="sa",
        diversity_filter_ratio=None,
        log_interval=50,):


        cost_model = XGBoostCostModel(
            task,
            feature_type=feature_type,
            loss_type=loss_type,
            num_threads=num_threads,
            log_interval=log_interval // 2,
        )
        if optimizer == "sa":
            optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
        else:
            assert isinstance(optimizer, ModelOptimizer), (
                "Optimizer must be " "a supported name string" "or a ModelOptimizer object."
            )

        super(raftXGBTuner, self).__init__(
            task, cost_model, optimizer, plan_size, diversity_filter_ratio)

        #随机
        self.rand_state = {}
        self.visited = []
        self.rand_max = len(self.task.config_space)

    def get_raft_score(self,inp):
        """使用运算密度模型为input排序，预测最好的排在前面

        Parameters
        ----------
        inputs:Array of autotvm.measure.MeasureInput
            The input for mearsurement
        """

        index=inp.config.index
        d=inp.config.to_json_dict()
        data_shape=inp.task.args[0][1]
        data=data_shape[0]*data_shape[1]*data_shape[2]*data_shape[3]
        weight_shape=inp.task.args[1][1]
        weight=weight_shape[0]*weight_shape[1]*weight_shape[2]*weight_shape[3]
        stride=inp.task.args[2][0]
        padding=inp.task.args[3][0]
        y=(data_shape[2]-weight_shape[2]+2*padding)//stride+1
        tile_f=d["entity"][0][2]
        tile_f[0]=weight_shape[0]//(tile_f[1]*tile_f[2]*tile_f[3])
        tile_y=d["entity"][1][2]
        tile_y[0]=y//(tile_y[1]*tile_y[2]*tile_y[3])
        tile_x=d["entity"][2][2]
        tile_x[0]=y//(tile_x[1]*tile_x[2]*tile_x[3])
        tile_rc=d["entity"][3][2]
        tile_rc[0]=data_shape[1]//tile_rc[1]
        tile_ry=d["entity"][4][2]
        tile_ry[0]=weight_shape[2]//tile_ry[1]
        tile_rx=d["entity"][5][2]
        tile_rx[0]=weight_shape[2]//tile_rx[1]
        warps=tile_f[2]*tile_y[2]*tile_x[2]//32
        threads=tile_f[2]*tile_y[2]*tile_x[2]
        grids=tile_f[0]*tile_y[0]*tile_x[0]
        compute_op=tile_f[3]*tile_y[3]*tile_x[3]*tile_rc[1]*tile_ry[1]*tile_rx[1]*tile_f[1]*tile_y[1]*tile_x[1]*2
        pad_shared=data/(tile_f[2]*tile_y[2]*tile_x[2]*tile_y[0]*tile_x[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])
        if pad_shared<1:
            pad_shared=1
        else:
            pad_shared=math.ceil(pad_shared)+1
        kernel_shared=weight/(tile_f[2]*tile_y[2]*tile_x[2]*tile_f[0]*tile_rc[0]*tile_ry[0]*tile_rx[0])
        kernel_shared=math.ceil(kernel_shared)
        shared=(pad_shared+kernel_shared)*4*tile_rc[0]*tile_ry[0]*tile_rx[0]*tile_f[2]*tile_y[2]*tile_x[2]/1024
        imax=compute_op/((pad_shared+kernel_shared)*4)
        auto_unroll=d["entity"][6][2]
        unroll_explicit=d["entity"][7][2]

        alpha=0
        if grids>=320:
            alpha=1
        else:
            alpha=grids/320
        
        beta=0
        if warps==0:
            beta=0
        elif warps<=8 and warps>=1:
            beta=(warps-1)/7
        elif warps>8:
            beta=1/(warps-7)
        score = imax*alpha*beta 

        return score
    

    def random_next_batch(self, batch_size):
        ret = []
        print("样本：",self.rand_max)
        for k in range(0,batch_size):
            if self.rand_max == 0:
                break

            # Random an indirect index.
            index_ = np.random.randint(self.rand_max)


            self.rand_max -= 1

            # Use the indirect index to get a direct index.
            index = self.rand_state.get(index_, index_) 
            ret.append(self.space.get(index))
            self.visited.append(index)

            # Update the direct index map.
            self.rand_state[index_] = self.rand_state.get(self.rand_max, self.rand_max)
            self.rand_state.pop(self.rand_max, None)
        return ret



    def tune(self, n_trial, measure_option, early_stopping=None, callbacks=()):
        """Begin tuning

        Parameters
        ----------
        n_trial: int
            Maximum number of configs to try (measure on real hardware)
        measure_option: dict
            The options for how to measure generated code.
            You should use the return value ot autotvm.measure_option for this argument.
        early_stopping: int, optional
            Early stop the tuning when not finding better configs in this number of trials
        callbacks: List of callable
            A list of callback functions. The signature of callback function is
            (Tuner, List of MeasureInput, List of MeasureResult)
            with no return value. These callback functions will be called on
            every measurement pair. See autotvm/tuner/callback.py for some examples.
        """
        measure_batch = create_measure_batch(self.task, measure_option)
        n_parallel = getattr(measure_batch, 'n_parallel', 1)
        early_stopping = early_stopping or 1e9
        self.n_trial = n_trial
        self.early_stopping = early_stopping
        self.best_score_list = []
        old_level = logger.level

        GLOBAL_SCOPE.in_tuning = True
        error_ct = 0
        cur = 0
        #一阶段
        while cur < n_trial:
            if not self.has_next():
                break
            
            configs=self.random_next_batch(10000)
            inputs = []
            rafts = []
            for config in configs:
                input = MeasureInput(self.task.target, self.task, config)                
                inputs.append(input)
                raft = self.get_raft_score(input)
                rafts.append(raft)

            inputs_with_raft = []
            b = len(configs)
            for i in range(0,b):
                inputs_with_raft.append((inputs[i],rafts[i]))
            raft_index_list = sorted(inputs_with_raft, key=lambda x: x[1],reverse=True)

            to_measure_inprafts = []

            last = -1
            for inpraft in raft_index_list:
                inp = inpraft[0]
                raft = inpraft[1]                
                if raft != last:
                    to_measure_inprafts.append(inpraft)
                last = raft


            results=[]
            measured_inps=[]
            print("测量的raft个数")
            print(len(to_measure_inprafts))
            k = 0
            #一阶段
            for inpraft in to_measure_inprafts:
                   
                inp = inpraft[0]
                raft = inpraft[1]
                res=measure_batch([inp,])

                if res[0].error_no!=0:
                    error_ct+=1
                else:
                    k+=1
                    measured_inps.append(inp)
                    results.append(res[0])
                    flag=True
                    flops=inp.task.flop/np.mean(res[0].costs)
                    config=inp.config
                    if flops>self.best_flops:
                        self.best_flops=flops
                        self.best_config=config
                        self.best_measure_pair=(inp,res)
                        self.best_iter=cur+k
                        #print("No:",cur + k, flops / 1e9, self.best_flops / 1e9,"raft:",raft,"time:",res)
                        logger.debug("No: %d\tGFLOPS: %.2f/%.2f\traft:%f\tresult: %s\t%s",cur + 1, flops / 1e9, self.best_flops / 1e9,raft,res, config)             
                    print("No:",cur + k, flops / 1e9, self.best_flops / 1e9,"raft:",raft,"time:",res)                    
                    for callback in callbacks:
                        callback(self, measured_inps, results)         

                    if k > self.best_iter+100:
                        print("一阶段结束")
                        break
            cur += len(results)

            inps_with_res = []
            for i in range(0,len(results)):
                inps_with_res.append((measured_inps[i],results[i]))

            inps_with_res_sorted = sorted(inps_with_res, key=lambda x: x[1])
            print("最好的10个raft分数以及性能：")
            for i,inp in enumerate(inps_with_res_sorted):
                if i < 10:
                    raft = self.get_raft_score(inp[0])
                    self.best_score_list.append(raft)
                    print("性能rank:",i,"raft:",raft,"性能：",inp[1][0])



            # if cur >= self.best_iter + early_stopping:
            #     logger.warning("Early stopped. Best iter: %d.", self.best_iter)
            #     break

            # if error_ct > 150:
            #     logging.basicConfig()
            #     logger.warning("Too many errors happen in the tuning. Now is in debug mode")
            #     logger.setLevel(old_level)
            # else:
            #     logger.setLevel(old_level)
        #开启二阶段
        #设置二阶段个数
        num = 150
        early_stop = 50
        best_num = -1
        #更新model
        self.update(measured_inps,results)
        k = 0
        while k<num:
            #xgb提出的next——batch
            configs=self.next_batch(10000) 
            inputs = []
            rafts = []
            for config in configs:
                input = MeasureInput(self.task.target, self.task, config)     
                raft = self.get_raft_score(input)
                if raft in self.best_score_list:
                    inputs.append(input)
                    rafts.append(raft)
            print("vgbhnjmkmjnbgvfdxdrcfgbhjkmjnbgvfcdcfvgbhnj")
            print(len(inputs))
            inputs_with_raft = []
            b = len(inputs)
            for i in range(0,b):
                inputs_with_raft.append((inputs[i],rafts[i]))
            
            for inpraft in inputs_with_raft:    
                if k > num:
                    break
                if k>best_num+early_stop and best_num!=-1:
                    print("提前终止迭代")
                    return

                inp = inpraft[0]
                raft = inpraft[1]
                res=measure_batch([inp,])

                if res[0].error_no!=0:
                    error_ct+=1
                else:
                    k+=1
                    measured_inps.append(inp)
                    results.append(res[0])
                    flag=True
                    flops=inp.task.flop/np.mean(res[0].costs)
                    config=inp.config
                    if flops>self.best_flops:
                        self.best_flops=flops
                        self.best_config=config
                        self.best_measure_pair=(inp,res)
                        self.best_iter=k
                        best_num = k
                        #print("No:",cur + k, flops / 1e9, self.best_flops / 1e9,"raft:",raft,"time:",res)
                        logger.debug("No: %d\tGFLOPS: %.2f/%.2f\traft:%f\tresult: %s\t%s",cur + 1, flops / 1e9, self.best_flops / 1e9,raft,res, config)             
                    print("No:",cur + k, flops / 1e9, self.best_flops / 1e9,"raft:",raft,"time:",res)                    
                    for callback in callbacks:
                        callback(self, measured_inps, results)         
            #更新model
            self.update(measured_inps,results)

        
  






        GLOBAL_SCOPE.in_tuning = False
