'''
Version: 2.0
Author: Yue Zhong
Date: 2024-11-07 16:11:10
Description: 
LastEditors: Yue Zhong
LastEditTime: 2025-01-06 21:58:11
'''
import time
import torch
import numpy as np
import logging
from copy import deepcopy

from spikingjelly.clock_driven import functional


import utils

from LLMconfig.utils import *
from LLMconfig.config import *
from LLMconfig.database import *

from blocks import PRIMITIVES
from space import CANDIDATE_BLOCKS, MACRO_SEARCH_SPACE

load_env()
client = getllm("suggest")

class DynamicPrompt(object):
    def __init__(self, task_id:int, constraints_dict:Dict= None, space_dict:Dict=None, 
                 prompt_id:int=0, use_fewshot=True):
        self.task_id = task_id
        self.task = Task(task_path,task_id)

        self.prompt_id = prompt_id
        self.promptbase = Prompt(prompt_path, prompt_id)
        self.prompt_dict = self.promptbase.get_prompt_dict()
        self.prompt_template_str = self.promptbase.get_prompt_template_str()
        
        # 针对训练日志
        self.use_fewshot = use_fewshot
        self.combine_input_vars = False

        self.data_prepare(constraints_dict, space_dict)


    def init_config_dict(self,total_invars:List)->Dict:
        assert not isempty(total_invars)
        return {k:"" for k in total_invars}

    def get_total_invars(self)->List[str]:
        input_vars:List[str] = self.prompt_dict['input_vars']
        if self.combine_input_vars:
            space_input_vars = self.task.space.get_total_invars()
            if not isempty(space_input_vars):
                input_vars.extend(space_input_vars)
        return input_vars

    def data_prepare(self, constraints_dict:Dict=None, space_dict:Dict=None):
        """
            完善数据准备, 尤其是配置config_dict, prefix, suffix
        Args:
            constraints_dict (Dict, optional): _description_. Defaults to None.
        """
        # ["Requirements","Knowledges","Constraints","EvalLogs","output_example", "SearchSpace"]
        
        self.input_vars = self.get_total_invars()
        self.config_dict = self.init_config_dict(self.input_vars)
        config_dict = {
               "Requirements": self.task.generate_requirement(),
               "Knowledges": "\n".join(self.task.knowledge),
               "Constraints": self.task.constraints.get_complete_str(constraints_dict),
               "SearchSpace": self.task.space.get_complete_description(space_dict),
               "output_example": self.task.space.get_example_str(),
        }
        self.config_dict.update(config_dict)
        if self.combine_input_vars and not isempty(space_dict):
            self.config_dict.update(space_dict)
        
        if self.use_fewshot:
            # 将使用动态的prompt——调用fewshot 生成 prompt, 此时prompt不将是str类型
            self.prefix, self.suffix = self.promptbase.get_suffix_prefix_by_name(r'{EvalLogs}')
            self.input_vars.remove('EvalLogs')
        else:
            # 使用固定的prompt——手动设计prompt, 此时prompt的类型是str, 用str.format()生成prompt
            self.suffix = None
            self.prefix = None
    
    def get_init_prompt(self) -> str:
        if self.task.evallog.isempty():
            evallog_dict = {'EvalLogs': ""}
            self.config_dict.update(evallog_dict)
            return self.prompt_template_str.format(**self.config_dict)
        elif self.use_fewshot:
            return self._generate_prompt_dynamic()
        else:
            # 不考虑token限制， 则可用随机选择器
            evloglist_random = random_example_selector(self.task.evallog.evallog)
            eval_dict = {'EvalLogs': "\n".join(evloglist_random)}
            self.config_dict.update(eval_dict)
            return self.prompt_template_str.format()

    def _generate_prompt_dynamic(self):
        # 正式调用前，首先更新训练日志，随后生成prompt
        config_dict = {k: self.config_dict[k] for k in self.input_vars}
        return generate_dynamic_prompt(self.task.evallog.evallog, self.prefix, self.suffix, config_dict)


    def _add_one_log(self, config:Dict, metrics:Dict)->str:
        self.task.evallog.add_log(len(self.task.evallog), config, metrics)
    
    def update_prompt(self, config, metrics):
        """
        添加一条新的训练日志, 并更新prompt
        """
        # 添加一条新的训练日志"
        self._add_one_log(config, metrics)
        if self.use_fewshot:
            return self._generate_prompt_dynamic()
        else:
            # 不考虑token限制， 则可用随机选择器
            evloglist_random = random_example_selector(self.task.evallog.evallog)
            eval_dict = {'EvalLogs': "\n".join(evloglist_random)}
            self.config_dict.update(eval_dict)
            return self.prompt_template_str.format(**self.config_dict)
    
class LLMSearch():
    def __init__(self, args,net):
        self.args = args
        self.net = net
        self.task_id = args.task_id

        self.seen_configs = {}
        self.trace = []

        self.fitness_lambda = args.fitness_lambda
        self.spikes_dominator = args.avg_num_spikes
        constraints_dict = {'train_avg_spiking_numbers':self.spikes_dominator, 'lambda':self.fitness_lambda}
        
        candidate = self.get_candidate_description(CANDIDATE_BLOCKS)
        search_space = self.get_search_space(args.search_space)
        space_dict = {
            "snn_params" : represent_dict(net.snn_params),
            "num_class" : str(args.num_class),
            "search_space" : search_space,
            "dataset_name" : args.dataset_name,
            "operators" : candidate
        }

        self.dynamic = DynamicPrompt(self.task_id,constraints_dict,space_dict)
    
    @staticmethod
    def get_candidate_description(candidate_list:List[str])->str:
        candidate_dict = {}
        for id, cand in enumerate(candidate_list):
            candidate_dict[id] = get_primitive_string(cand, PRIMITIVES)
        return str(candidate_dict)

    @staticmethod
    def get_search_space(search_space:str)->str:
        search_space_dict = MACRO_SEARCH_SPACE[search_space]
        return represent_dict(search_space_dict)

    def search(self, max_search_iter, valid_loader, train_loader):
        
        best_config = []
        best_metrics = {}
        score = np.float32('-inf')
        logging.info("\nStart searching...\n")
        prompt = self.generate_init_prompt()
        for it in range(max_search_iter):
            logging.info(f"The current step is: {it}")
            t0 = time.perf_counter()
            config_str = responsellm(client=client, prompt=prompt)

            logging.info(f"The current response is: \n{config_str}\n\n")
            config_dict = self.parse_config(config_str)
            block_ids = config_dict.get('block_ids',None)
            if isinstance(block_ids, str):
                block_ids = eval(block_ids)
            assert not isempty(block_ids), "block_ids不能为空"
            assert (len(block_ids) == 5), f"block_ids:{block_ids}的长度必须为5"
            self.trace.append(block_ids)
            logging.info(f"The current config is: {block_ids}")

            metrics_dict = self.evaluate(valid_loader, block_ids)
            prompt = self.update_prompt(config_dict, metrics_dict)
            
            if score < metrics_dict['fitness']:
                score = metrics_dict['fitness']
                best_config = block_ids
                best_metrics = metrics_dict
            t1 = time.perf_counter()
            logging.info(f"The current {it} step time is: {self._round(t1-t0)} s")

        # print the newest eval log
        logging.info(f"The best config is: {best_config}")
        logging.info(f"The best metrics is: {best_metrics}")
        logging.info("End searching!\n")
        logging.info(f"The last prompt is: \n{prompt}\n\n")
        logging.info(f"The eval log is: \n{str(self.dynamic.task.evallog)}")
    
    def generate_init_prompt(self):
        # TODO : generate the prompt
        return self.dynamic.get_init_prompt()
    
    def parse_config(self, config_str:str)->Dict:
        # TODO : parse the config_str to a dict
        # 完善ResponseSchema
        result_response_schema = [
            ResponseSchema (
            name = "block_ids", 
            description = "The configuration of the five TBDs layers and is a separated Python List[int] with lenght=5, block_ids=[block_id1, block_id2, ...].\
                    If this information is not found, output [].",
            type = "List[int]"
            )
        ]
        config_dict = config2json(client, config_str, result_response_schema)
        return config_dict
    
    def update_prompt(self,config_dict:Dict, metrics_dict:Dict):
        # prompt更新 
        prompt = self.dynamic.update_prompt(config_dict, metrics_dict)
        return prompt


    def evaluate(self, loader, block_ids:List[int])->Dict:
        """
        evaluate the config
        """
        temp = ''.join([str(i) for i in block_ids])
        if temp in self.seen_configs:
            metrics = self.seen_configs[temp]
            logging.info(f"The config has happened, and the evaultion metrics is {metrics}")
        else:
            complete_block_ids = self.decode_arch(block_ids)
            acc, spikes = self.infer(loader, self.net, self.args, complete_block_ids)
            fitness = acc * pow(spikes / self.spikes_dominator, self.fitness_lambda)
            metrics = {'accuracy': self._round(acc), 'spikes_numbers': self._round(spikes), 'fitness': self._round(fitness)}
            
            self.seen_configs[temp] = metrics
            logging.info(f"The config has been evaluated, and the evalution metrics is {metrics}")
        return metrics
    
    def _round(self, x)->float:
        if isinstance(x, torch.Tensor):
            return round(x.item(), DIGIT)
        elif isinstance(x,  int):
            return x
        else:
            return round(x, DIGIT)
    def decode_arch(self, block_ids:List[int])->List[int]:
        """ decode only containing TBDs layer to complete arch
        """
        complete_arch = deepcopy(block_ids)
        complete_arch.insert(1,-1)
        complete_arch.insert(-2,-1)
        complete_arch.append(-1)
        return complete_arch

    def infer(self, loader, net, args, block_ids=None):
        assert(block_ids is not None)
        top1 = utils.AverageMeter()
        num_spikes = utils.AverageMeter()
        net.eval()

        total_correct = 0
        total_num = 0
        num_steps = len(loader)
        with torch.no_grad():
            for step, (input, target) in enumerate(loader):
                input = input.cuda()
                target = target.cuda()

                if args.neuron == 'ANN':
                    logits, _ = net(input, block_ids)
                    acc, _ = utils.accuracy(logits, target, topk=(1, 5))
                    num_of_spikes = 0
                else:
                    out_spikes_counter, num_of_spikes, _ = net(input, block_ids)
                    functional.reset_net(net)
                    acc = (out_spikes_counter.argmax(dim=1) == target).float().sum()
                    total_correct += acc

                n = input.size(0)
                total_num += n
                top1.update(acc.item(), n)
                num_spikes.update(num_of_spikes, n)

        if args.neuron == 'ANN':
            return top1.avg, num_spikes.avg
        else:
            return total_correct / total_num, num_spikes.avg

if __name__ == '__main__':
    
    pass
