{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Namespace(save='EXP', arch_sampling='uniform', supernet='./macro_search_result2/uniform_sampling/AutoSNN_64_CIFAR10_SNN_Adam_600ep_2022/batch_size_96/checkpoint.pth.tar', search_algo='LLM', num_return_archs=1, dataset_dir='./data', dataset_name='CIFAR10', gpu=0, seed=2022, search_seed=99, train_portion=0.8, search_space='AutoSNN_64', batch_size=96, T=8, init_tau=2.0, v_threshold=1.0, neuron='PLIF', split_by='number', normalization=None, cutout=False, cutout_length=16, max_search_iter=5, fitness='ACC_pow_spikes', fitness_lambda=-0.08, avg_num_spikes=None, task_id=1)\n"
     ]
    }
   ],
   "source": [
    "import argparse\n",
    "\n",
    "# 创建一个字典来模拟命令行参数\n",
    "args_dict = {\n",
    "    'save': 'EXP',\n",
    "    'arch_sampling': 'uniform',\n",
    "    'supernet': './macro_search_result2/uniform_sampling/AutoSNN_64_CIFAR10_SNN_Adam_600ep_2022/batch_size_96/checkpoint.pth.tar',\n",
    "    'search_algo': 'LLM',\n",
    "    'num_return_archs': 1,\n",
    "    'dataset_dir': './data',\n",
    "    'dataset_name': 'CIFAR10',\n",
    "    'gpu': 0,\n",
    "    'seed': 2022,\n",
    "    'search_seed': 99,\n",
    "    'train_portion': 0.8,\n",
    "    'search_space': 'AutoSNN_64',\n",
    "    'batch_size': 96,\n",
    "    'T': 8,\n",
    "    'init_tau': 2.0,\n",
    "    'v_threshold': 1.0,\n",
    "    'neuron': 'PLIF',\n",
    "    'split_by': 'number',\n",
    "    'normalization': None,\n",
    "    'cutout': False,\n",
    "    'cutout_length': 16,\n",
    "    'max_search_iter': 5,\n",
    "\n",
    "    'fitness': 'ACC_pow_spikes',\n",
    "    'fitness_lambda': -0.08,\n",
    "    'avg_num_spikes': None,\n",
    "    'task_id': 1\n",
    "}\n",
    "\n",
    "# 将字典转换为 argparse.Namespace 对象\n",
    "args = argparse.Namespace(**args_dict)\n",
    "\n",
    "# 打印 args 以验证\n",
    "print(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "if './search_arch' not in sys.path:\n",
    "    sys.path.insert(0, './search_arch')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from search_arch.LLMconfig.utils import get_primitive_string\n",
    "# from blocks import PRIMITIVES\n",
    "# for k in PRIMITIVES.keys():\n",
    "#     prstring = get_primitive_string(k,PRIMITIVES)\n",
    "#     print(prstring)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "if True:\n",
    "    if args.supernet is None:\n",
    "        raise ValueError('A trained supernet is required.')\n",
    "    dataset_dir = args.dataset_dir\n",
    "    dataset_name = args.dataset_name\n",
    "\n",
    "    T = args.T\n",
    "    init_tau = args.init_tau\n",
    "    v_threshold = args.v_threshold\n",
    "    neuron = args.neuron\n",
    "\n",
    "    split_by = args.split_by\n",
    "    normalization = args.normalization\n",
    "    if normalization == 'None':\n",
    "        normalization = None\n",
    "\n",
    "    args.save = args.supernet.replace('checkpoint.pth.tar', '')\n",
    "    tmp_neuron = 'ANN' if args.neuron == 'ANN' else 'SNN'\n",
    "    if args.search_algo == 'just_sampling':\n",
    "        search_name = f'{args.search_space}_{args.dataset_name}_{tmp_neuron}_{args.search_algo}_{args.seed}'\n",
    "    else:\n",
    "        search_name = f'{args.search_space}_{args.dataset_name}_{tmp_neuron}_{args.seed}_{args.search_algo}_{args.fitness}_{args.fitness_lambda}_{args.search_seed}'\n",
    "        arch_prefix = f'{args.search_space}_{args.dataset_name}_{tmp_neuron}_{args.seed}_{args.search_algo}_{args.fitness}_{-1 * int(args.fitness_lambda * 100)}_{args.search_seed}'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "# build model\n",
    "snn_params={'T':            T, \n",
    "                'init_tau':     init_tau, \n",
    "                'v_threshold':  v_threshold,\n",
    "                'neuron':       neuron,\n",
    "                'is_DVS_data':  'DVS' in args.dataset_name,\n",
    "    }\n",
    "ckpt = torch.load(args.supernet, map_location=torch.device('cpu'), weights_only=True)\n",
    "\n",
    "if args.avg_num_spikes is None:\n",
    "    args.avg_num_spikes = ckpt['avg_num_spikes']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "from search_arch.LLMSearch import LLMSearch, DynamicPrompt\n",
    "from search_arch.LLMconfig.database import represent_dict\n",
    "constraints_dict = {'train_avg_spiking_numbers':args.avg_num_spikes, 'lambda':args.fitness_lambda}\n",
    "snn_params_str = represent_dict(snn_params)\n",
    "args.num_class = 10\n",
    "from space import CANDIDATE_BLOCKS\n",
    "candidate = LLMSearch.get_candidate_description(CANDIDATE_BLOCKS)\n",
    "search_space = LLMSearch.get_search_space(args.search_space)\n",
    "space_dict = {\n",
    "            \"snn_params\" : represent_dict(snn_params),\n",
    "            \"num_class\" : str(args.num_class),\n",
    "            \"search_space\" : search_space,\n",
    "            \"dataset_name\" : args.dataset_name,\n",
    "            \"operators\" : candidate\n",
    "        }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Name: langchain\n",
      "Version: 0.3.14\n",
      "Summary: Building applications with LLMs through composability\n",
      "Home-page: https://github.com/langchain-ai/langchain\n",
      "Author: \n",
      "Author-email: \n",
      "License: MIT\n",
      "Location: /home/nudt/anaconda3/envs/python312/lib/python3.12/site-packages\n",
      "Requires: aiohttp, langchain-core, langchain-text-splitters, langsmith, numpy, pydantic, PyYAML, requests, SQLAlchemy, tenacity\n",
      "Required-by: langchain-community\n"
     ]
    }
   ],
   "source": [
    "# !pip install --upgrade typing_extensions\n",
    "!pip show langchain\n",
    "# !pip install typing_extensions==4.8.0\n",
    "# from typing_extensions import TypeIs, deprecated, get_args, get_origin\n",
    "# !python -c \"import typing_extensions; print(typing_extensions.__file__)\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'T': 8, 'init_tau': 2.0, 'v_threshold': 1.0, 'neuron': PLIF, 'is_DVS_data': False\n"
     ]
    }
   ],
   "source": [
    "print(snn_params_str)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !openssl x509 -in e:/anaconda3/envs/llm4nas/Lib/site-packages/certifi/cacert.pem -purpose -noout\n",
    "# !openssl x509 -in E:/anaconda3/envs/llm4nas/Library/ssl/cacert.pem -purpose -noout"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "task_id = 1\n",
    "dynamic = DynamicPrompt(task_id,constraints_dict,space_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from search_arch.LLMconfig.config import load_env, responsellm, getllm\n",
    "from search_arch.LLMconfig.constants import LLM_MODELS\n",
    "load_env()\n",
    "client = getllm(\"suggest\")\n",
    "# prompt = \"please write a python code to find the maximum number in a list\"\n",
    "# responsellm(client=client,prompt=prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'Constraints': 'Model must exceed 80% accuracy and achieve a fitness score '\n",
      "                'over 85% on the test set, calculated by fitness = accuracy * '\n",
      "                '(infer_spiking_numbers / 316839.057475)^-0.08, with no '\n",
      "                'specific hardware constraints on inferencing.',\n",
      " 'EvalLogs': '',\n",
      " 'Knowledges': 'One shot NAS is an efficient neural network architecture '\n",
      "               'search method that utilizes hypernetworks and weight sharing '\n",
      "               'to reduce search costs. The hypernetwork contains all '\n",
      "               'candidate architectures, with weights shared among '\n",
      "               'subnetworks, avoiding the need to train each architecture '\n",
      "               'separately. Weight sharing allows sub networks to inherit the '\n",
      "               'weights of the hypernetwork, reducing the need for training '\n",
      "               'from scratch.\\n'\n",
      "               'One shot NAS typically uses evolutionary algorithms for '\n",
      "               'architecture search, generating new candidate architectures '\n",
      "               'through mutation and crossover operations, and selecting the '\n",
      "               'optimal architecture based on performance metrics. This method '\n",
      "               'is efficient, scalable, and flexible, and has been widely used '\n",
      "               'in tasks such as image classification and object detection, '\n",
      "               'and has achieved success in SNN fields such as AutoSNN.\\n'\n",
      "               'Direct encoding, using encoding layer(s) to uniformly encode '\n",
      "               'the input image. This schema is simpler, flexible, and more '\n",
      "               'accurate when the simulation timestep is small, and more '\n",
      "               'suitable for deep spiking neural networks\\n'\n",
      "               'The Spiking Convolution Block (SCB) derived from VGGNet '\n",
      "               'comprises two convolutional layers, each followed by BatchNorm '\n",
      "               'and a spiking neuron. The first layer transforms from '\n",
      "               '**C_input** to **C_output** channels, while the second '\n",
      "               'maintains **C_output** channels for both input and output, '\n",
      "               'ensuring consistent feature mapping.\\n'\n",
      "               'The Spiking Residual Block (SRB) derived from ResNet features '\n",
      "               'two convolutional layers, each followed by BatchNorm and '\n",
      "               'spiking neurons. The first layer maps **C_input** to '\n",
      "               '**C_output** channels, while the second retains **C_output** '\n",
      "               'channels. A skip connection adds the input feature map to the '\n",
      "               'output after the second layer, enhancing signal propagation.',\n",
      " 'Requirements': 'Create an SNN for classifying 32x32-pixel images into 10 '\n",
      "                 'categories. The model should feature batch normalization for '\n",
      "                 'enhanced generalization. The goal is to score ≥85% fitness '\n",
      "                 '(accuracy + spiking numbers) on the test set, proving '\n",
      "                 'effectiveness with small, low-res imagery. Metrics include '\n",
      "                 'accuracy and spiking numbers. Preprocessing involves '\n",
      "                 'normalization, with data augmented by random cropping and '\n",
      "                 'horizontal flipping.\\n'\n",
      "                 'Examples\\n'\n",
      "                 'TBD layers configured as: [1,2,3,4,0].\\n'\n",
      "                 ' namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th '\n",
      "                 'skip_connection.\\n'\n",
      "                 ' Refer to examples above and the SearchSpace description '\n",
      "                 'below, specify the five TBD layers in macro-level '\n",
      "                 'architecture, choosing from the five TBDs: '\n",
      "                 'skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), '\n",
      "                 'SRB_k5(4). And Output python list of five non-negative '\n",
      "                 'integers, and please keep format same as examples above.',\n",
      " 'SearchSpace': 'Each architecture can be implementation via the followed '\n",
      "                'Pytorch code:\\n'\n",
      "                '\\n'\n",
      "                'This architecture can be represented by a candidate block_id '\n",
      "                'list, i.e., block_ids = [op0, op1, op2, op3, op4, op5], where '\n",
      "                'each TBD layer is with a specific operation such as pooling '\n",
      "                'and convolution.\\n'\n",
      "                '\\n'\n",
      "                '```\\n'\n",
      "                '#There are 5 candidate operation for each TBD layer: \\n'\n",
      "                \"operators = {0: 'Identity(C_in, C_out, snn_params) if stride \"\n",
      "                \"== 1 else FactorizedReduce(C_in, C_out, snn_params)', 1: \"\n",
      "                \"'SpikingConvBlock(C_in, C_out, 3, stride, snn_params)', 2: \"\n",
      "                \"'SpikingConvBlock(C_in, C_out, 5, stride, snn_params)', 3: \"\n",
      "                \"'SpikingResidualBlock(C_in, C_out, 3, stride, snn_params)', \"\n",
      "                \"4: 'SpikingResidualBlock(C_in, C_out, 5, stride, \"\n",
      "                \"snn_params)'}\\n\"\n",
      "                '\\n'\n",
      "                '# SpikingNetwork class is used to represent the '\n",
      "                'architecture: \\n'\n",
      "                'class SpikingNetwork(nn.Module):\\n'\n",
      "                '    def __init__(self, search_space, block_ids ,num_class, '\n",
      "                'snn_params, dataset_name):\\n'\n",
      "                '        super(SpikingNetwork, self).__init__()\\n'\n",
      "                '        # SNN parameters\\n'\n",
      "                '        self.snn_params = snn_params\\n'\n",
      "                '        if snn_params is not None:\\n'\n",
      "                \"            self.T = snn_params['T']\\n\"\n",
      "                \"            init_tau = snn_params['init_tau']\\n\"\n",
      "                \"            v_threshold = snn_params['v_threshold']\\n\"\n",
      "                \"            neuron = snn_params['neuron']\\n\"\n",
      "                \"            self.is_DVS_data = snn_params['is_DVS_data']\\n\"\n",
      "                '        if dataset_name is not None:\\n'\n",
      "                '            self.is_tiny_imageNet_200 = dataset_name '\n",
      "                \"=='Tiny-ImageNet-200'\\n\"\n",
      "                '        else:\\n'\n",
      "                '            self.is_tiny_imageNet_200 = False\\n'\n",
      "                '        # build network\\n'\n",
      "                \"        C_stem = search_space['stem_channel']\\n\"\n",
      "                \"        channels = search_space['block_channels']\\n\"\n",
      "                \"        strides = search_space['strides']\\n\"\n",
      "                \"        use_GAP = search_space['use_GAP']\\n\"\n",
      "                '\\n'\n",
      "                '        img_size = 32\\n'\n",
      "                '        C_in = 3\\n'\n",
      "                '        if self.is_DVS_data:\\n'\n",
      "                '            img_size = 128\\n'\n",
      "                '            C_in =  2\\n'\n",
      "                '        elif self.is_tiny_imageNet_200:\\n'\n",
      "                '            img_size = 64\\n'\n",
      "                '\\n'\n",
      "                '        # stem conv layer\\n'\n",
      "                '        self.conv_stem = nn.Sequential(\\n'\n",
      "                '            nn.Conv2d(C_in, C_stem, kernel_size=3, stride=1, '\n",
      "                'padding=1, bias=False),\\n'\n",
      "                '            nn.BatchNorm2d(C_stem)\\n'\n",
      "                '        )\\n'\n",
      "                '        self.spike_neuron_stem = NEURON[neuron](init_tau, '\n",
      "                'v_threshold)\\n'\n",
      "                '\\n'\n",
      "                '        if self.is_DVS_data:\\n'\n",
      "                '            self.DVS_max_pool_1 = max_pool_k2(C_stem, C_stem, '\n",
      "                'stride=2, snn_params)\\n'\n",
      "                '            img_size = img_size // 2\\n'\n",
      "                '\\n'\n",
      "                '            self.DVS_conv_1 = nn.Sequential(\\n'\n",
      "                '                nn.Conv2d(C_stem, C_stem, kernel_size=3, '\n",
      "                'stride=1, padding=1, bias=False),\\n'\n",
      "                '                nn.BatchNorm2d(C_stem),\\n'\n",
      "                '                NEURON[neuron](init_tau, v_threshold),\\n'\n",
      "                '            )\\n'\n",
      "                '            self.DVS_max_pool_2 = max_pool_k2(C_stem, C_stem, '\n",
      "                'stride=2, snn_params)\\n'\n",
      "                '            img_size = img_size // 2\\n'\n",
      "                '        elif self.is_tiny_imageNet_200:\\n'\n",
      "                '            self.ImageNet_max_pool = max_pool_k2(C_stem, '\n",
      "                'C_stem, stride=2, snn_param)\\n'\n",
      "                '            img_size = img_size // 2\\n'\n",
      "                '            self.ImageNet_conv = nn.Sequential(\\n'\n",
      "                '                nn.Conv2d(C_stem, C_stem, kernel_size=3, '\n",
      "                'stride=1, padding=1, bias=False),\\n'\n",
      "                '                nn.BatchNorm2d(C_stem),\\n'\n",
      "                '                NEURON[neuron](init_tau, v_threshold),\\n'\n",
      "                '            )\\n'\n",
      "                '            \\n'\n",
      "                '        # TBD blocks\\n'\n",
      "                '        self.num_TBDs = 0\\n'\n",
      "                '        self.layers = nn.ModuleList()\\n'\n",
      "                '        C_in = C_stem\\n'\n",
      "                '        for C_out, stride in zip(channels, strides):\\n'\n",
      "                '            if stride == 1:\\n'\n",
      "                '                block_id = block_ids[self.num_TBDs]\\n'\n",
      "                '                '\n",
      "                'self.layers.append(SpikingBlockSet(block_id,C_in, C_out, '\n",
      "                'stride, self.snn_params))\\n'\n",
      "                '                self.num_TBDs += 1\\n'\n",
      "                '            elif stride == 2:\\n'\n",
      "                \"                if C_out == 'm':\\n\"\n",
      "                \"                    block_name = 'max_pool_k2'\\n\"\n",
      "                '                    self.layers.append(max_pool_k2(C_in, '\n",
      "                'C_out, stride, self.snn_params))\\n'\n",
      "                '                    C_out = C_in\\n'\n",
      "                '                else:\\n'\n",
      "                '                    block_id = block_ids[self.num_TBDs]\\n'\n",
      "                '                    self.layers.append(SpikingBlockSet(C_in, '\n",
      "                'C_out, stride, self.snn_params))\\n'\n",
      "                '                    self.num_TBDs += 1\\n'\n",
      "                '                img_size = img_size // 2\\n'\n",
      "                '            C_in = C_out\\n'\n",
      "                '        # FC layer\\n'\n",
      "                '        if use_GAP:\\n'\n",
      "                '            self.avgpool = nn.Sequential(\\n'\n",
      "                '                nn.AdaptiveAvgPool2d((1, 1)),\\n'\n",
      "                '                NEURON[neuron](init_tau, v_threshold),\\n'\n",
      "                '            )\\n'\n",
      "                '            img_size = 1\\n'\n",
      "                '        else:\\n'\n",
      "                '            self.avgpool = None\\n'\n",
      "                '        C_in = C_in * img_size * img_size\\n'\n",
      "                '\\n'\n",
      "                '        self.fc = nn.Sequential(\\n'\n",
      "                '            nn.Flatten(),\\n'\n",
      "                '            nn.Linear(C_in, num_class * 10, bias=False),\\n'\n",
      "                '            NEURON[neuron](init_tau, v_threshold),\\n'\n",
      "                '        )\\n'\n",
      "                '        self.boost = nn.AvgPool1d(10, 10)\\n'\n",
      "                '\\n'\n",
      "                '    def forward(self, x):\\n'\n",
      "                '        if self.is_DVS_data:\\n'\n",
      "                '            out_spikes_counter = None\\n'\n",
      "                '            x = x.permute(1, 0, 2, 3, 4) # [T, N, 2, *, *]\\n'\n",
      "                '            for t in range(x.shape[0]):\\n'\n",
      "                '                out = '\n",
      "                'self.spike_neuron_stem(self.conv_stem(x[t]))\\n'\n",
      "                '                out, _ = self.DVS_max_pool_1(out)\\n'\n",
      "                '                out = self.DVS_conv_1(out)\\n'\n",
      "                '                out, _ = self.DVS_max_pool_2(out)\\n'\n",
      "                '                for layer in self.layers:\\n'\n",
      "                '                    out, _ = layer(out)\\n'\n",
      "                '                if self.avgpool is not None:\\n'\n",
      "                '                    out = self.avgpool(out)\\n'\n",
      "                '                out = self.fc(out)\\n'\n",
      "                '                if out_spikes_counter is None:\\n'\n",
      "                '                    out_spikes_counter = '\n",
      "                'self.boost(out.unsqueeze(1)).squeeze(1)\\n'\n",
      "                '                else:\\n'\n",
      "                '                    out_spikes_counter += '\n",
      "                'self.boost(out.unsqueeze(1)).squeeze(1)\\n'\n",
      "                '            return out_spikes_counter\\n'\n",
      "                '        else:\\n'\n",
      "                '            out_spikes_counter = None\\n'\n",
      "                '            x = self.conv_stem(x)\\n'\n",
      "                '            for t in range(self.T):\\n'\n",
      "                '                out = self.spike_neuron_stem(x)\\n'\n",
      "                '                if self.is_tiny_imageNet_200:\\n'\n",
      "                '                    out, _ = self.ImageNet_max_pool(out)\\n'\n",
      "                '                    out = self.ImageNet_conv(out)\\n'\n",
      "                '                for layer in self.layers:\\n'\n",
      "                '                    out, _ = layer(out)\\n'\n",
      "                '                if self.avgpool is not None:\\n'\n",
      "                '                    out = self.avgpool(out)\\n'\n",
      "                '                out = self.fc(out)\\n'\n",
      "                '                if out_spikes_counter is None:\\n'\n",
      "                '                    out_spikes_counter = '\n",
      "                'self.boost(out.unsqueeze(1)).squeeze(1)\\n'\n",
      "                '                else:\\n'\n",
      "                '                    out_spikes_counter += '\n",
      "                'self.boost(out.unsqueeze(1)).squeeze(1)\\n'\n",
      "                '            return out_spikes_counter\\n'\n",
      "                '\\n'\n",
      "                '# we can obtain a specific model architecture via the '\n",
      "                'followed function\\n'\n",
      "                'Model_architecture = SpikingNetwork(search_space, block_ids, '\n",
      "                'num_class, snn_params, dataset_name),\\n'\n",
      "                \"where search_space='stem_channel': 64, 'block_channels': [64, \"\n",
      "                \"'m', 128, 128, 'm', 256, 256, 'm'], 'strides': [1, 2, 1, 1, \"\n",
      "                \"2, 1, 1, 2], 'use_GAP': False \\n\"\n",
      "                'num_class=10, dataset_name=CIFAR10, \\n'\n",
      "                \"snn_params='T': 8, 'init_tau': 2.0, 'v_threshold': 1.0, \"\n",
      "                \"'neuron': PLIF, 'is_DVS_data': False\\n\"\n",
      "                '\\n'\n",
      "                '## other information\\n'\n",
      "                'NEURON = {\\n'\n",
      "                \"  'IF': lambda tau, v_threshold: \"\n",
      "                'IFNode(v_threshold=v_threshold, '\n",
      "                'surrogate_function=surrogate.ATan(), detach_reset=True),\\n'\n",
      "                \"  'LIF': lambda tau, v_threshold: LIFNode(tau=tau, \"\n",
      "                'v_threshold=v_threshold, surrogate_function=surrogate.ATan(), '\n",
      "                'detach_reset=True),\\n'\n",
      "                \"  'PLIF': lambda tau, v_threshold: PLIFNode(init_tau=tau, \"\n",
      "                'v_threshold=v_threshold, surrogate_function=surrogate.ATan(), '\n",
      "                'detach_reset=True),\\n'\n",
      "                '}\\n'\n",
      "                '# SpikingBlockSet class is the basical building block of the '\n",
      "                'network\\n'\n",
      "                'class SpikingBlockSet(nn.Module):\\n'\n",
      "                '    def __init__(self, block_id, C_in, C_out, stride, '\n",
      "                'snn_params):\\n'\n",
      "                '        super(SpikingBlockSet, self).__init__()\\n'\n",
      "                '        self.block = operators[block_id](C_in, C_out, stride, '\n",
      "                'snn_params)\\n'\n",
      "                '    def forward(self, x):\\n'\n",
      "                '        return self.block(x)\\n'\n",
      "                '```\\n',\n",
      " 'output_example': 'Examples\\n'\n",
      "                   'TBD layers configured as: [1,2,3,4,0].\\n'\n",
      "                   ' namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, '\n",
      "                   '5th skip_connection.\\n'\n",
      "                   ' Refer to examples above and the SearchSpace description '\n",
      "                   'below, specify the five TBD layers in macro-level '\n",
      "                   'architecture, choosing from the five TBDs: '\n",
      "                   'skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), '\n",
      "                   'SRB_k5(4). And Output python list of five non-negative '\n",
      "                   'integers, and please keep format same as examples above.'}\n"
     ]
    }
   ],
   "source": [
    "from pprint import pprint\n",
    "from search_arch.LLMSearch import LLMSearch, DynamicPrompt\n",
    "pprint(dynamic.config_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# Role\n",
      "\n",
      "AutoML Candidate Generator\n",
      "\n",
      "## Profile\n",
      "\n",
      "This role is specialized in generating high-quality candidate sets in the AutoML field, aimed at assisting users in creating candidate sets to optimize the performance and hardware efficiency of SNNs (Spiking Neural Networks).\n",
      "\n",
      "## Knowledges\n",
      "\n",
      "One shot NAS is an efficient neural network architecture search method that utilizes hypernetworks and weight sharing to reduce search costs. The hypernetwork contains all candidate architectures, with weights shared among subnetworks, avoiding the need to train each architecture separately. Weight sharing allows sub networks to inherit the weights of the hypernetwork, reducing the need for training from scratch.\n",
      "One shot NAS typically uses evolutionary algorithms for architecture search, generating new candidate architectures through mutation and crossover operations, and selecting the optimal architecture based on performance metrics. This method is efficient, scalable, and flexible, and has been widely used in tasks such as image classification and object detection, and has achieved success in SNN fields such as AutoSNN.\n",
      "Direct encoding, using encoding layer(s) to uniformly encode the input image. This schema is simpler, flexible, and more accurate when the simulation timestep is small, and more suitable for deep spiking neural networks\n",
      "The Spiking Convolution Block (SCB) derived from VGGNet comprises two convolutional layers, each followed by BatchNorm and a spiking neuron. The first layer transforms from **C_input** to **C_output** channels, while the second maintains **C_output** channels for both input and output, ensuring consistent feature mapping.\n",
      "The Spiking Residual Block (SRB) derived from ResNet features two convolutional layers, each followed by BatchNorm and spiking neurons. The first layer maps **C_input** to **C_output** channels, while the second retains **C_output** channels. A skip connection adds the input feature map to the output after the second layer, enhancing signal propagation.\n",
      "\n",
      "## Constraints\n",
      "\n",
      "Model must exceed 80% accuracy and achieve a fitness score over 85% on the test set, calculated by fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08, with no specific hardware constraints on inferencing.\n",
      "\n",
      "## Eval logs\n",
      "\n",
      "\n",
      "\n",
      "## Requirements\n",
      "\n",
      "Create an SNN for classifying 32x32-pixel images into 10 categories. The model should feature batch normalization for enhanced generalization. The goal is to score ≥85% fitness (accuracy + spiking numbers) on the test set, proving effectiveness with small, low-res imagery. Metrics include accuracy and spiking numbers. Preprocessing involves normalization, with data augmented by random cropping and horizontal flipping.\n",
      "Examples\n",
      "TBD layers configured as: [1,2,3,4,0].\n",
      " namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\n",
      " Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above.\n",
      "\n",
      "## Search Space\n",
      "\n",
      "Each architecture can be implementation via the followed Pytorch code:\n",
      "\n",
      "This architecture can be represented by a candidate block_id list, i.e., block_ids = [op0, op1, op2, op3, op4, op5], where each TBD layer is with a specific operation such as pooling and convolution.\n",
      "\n",
      "```\n",
      "#There are 5 candidate operation for each TBD layer: \n",
      "operators = {0: 'Identity(C_in, C_out, snn_params) if stride == 1 else FactorizedReduce(C_in, C_out, snn_params)', 1: 'SpikingConvBlock(C_in, C_out, 3, stride, snn_params)', 2: 'SpikingConvBlock(C_in, C_out, 5, stride, snn_params)', 3: 'SpikingResidualBlock(C_in, C_out, 3, stride, snn_params)', 4: 'SpikingResidualBlock(C_in, C_out, 5, stride, snn_params)'}\n",
      "\n",
      "# SpikingNetwork class is used to represent the architecture: \n",
      "class SpikingNetwork(nn.Module):\n",
      "    def __init__(self, search_space, block_ids ,num_class, snn_params, dataset_name):\n",
      "        super(SpikingNetwork, self).__init__()\n",
      "        # SNN parameters\n",
      "        self.snn_params = snn_params\n",
      "        if snn_params is not None:\n",
      "            self.T = snn_params['T']\n",
      "            init_tau = snn_params['init_tau']\n",
      "            v_threshold = snn_params['v_threshold']\n",
      "            neuron = snn_params['neuron']\n",
      "            self.is_DVS_data = snn_params['is_DVS_data']\n",
      "        if dataset_name is not None:\n",
      "            self.is_tiny_imageNet_200 = dataset_name =='Tiny-ImageNet-200'\n",
      "        else:\n",
      "            self.is_tiny_imageNet_200 = False\n",
      "        # build network\n",
      "        C_stem = search_space['stem_channel']\n",
      "        channels = search_space['block_channels']\n",
      "        strides = search_space['strides']\n",
      "        use_GAP = search_space['use_GAP']\n",
      "\n",
      "        img_size = 32\n",
      "        C_in = 3\n",
      "        if self.is_DVS_data:\n",
      "            img_size = 128\n",
      "            C_in =  2\n",
      "        elif self.is_tiny_imageNet_200:\n",
      "            img_size = 64\n",
      "\n",
      "        # stem conv layer\n",
      "        self.conv_stem = nn.Sequential(\n",
      "            nn.Conv2d(C_in, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "            nn.BatchNorm2d(C_stem)\n",
      "        )\n",
      "        self.spike_neuron_stem = NEURON[neuron](init_tau, v_threshold)\n",
      "\n",
      "        if self.is_DVS_data:\n",
      "            self.DVS_max_pool_1 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\n",
      "            img_size = img_size // 2\n",
      "\n",
      "            self.DVS_conv_1 = nn.Sequential(\n",
      "                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "                nn.BatchNorm2d(C_stem),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            self.DVS_max_pool_2 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\n",
      "            img_size = img_size // 2\n",
      "        elif self.is_tiny_imageNet_200:\n",
      "            self.ImageNet_max_pool = max_pool_k2(C_stem, C_stem, stride=2, snn_param)\n",
      "            img_size = img_size // 2\n",
      "            self.ImageNet_conv = nn.Sequential(\n",
      "                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "                nn.BatchNorm2d(C_stem),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            \n",
      "        # TBD blocks\n",
      "        self.num_TBDs = 0\n",
      "        self.layers = nn.ModuleList()\n",
      "        C_in = C_stem\n",
      "        for C_out, stride in zip(channels, strides):\n",
      "            if stride == 1:\n",
      "                block_id = block_ids[self.num_TBDs]\n",
      "                self.layers.append(SpikingBlockSet(block_id,C_in, C_out, stride, self.snn_params))\n",
      "                self.num_TBDs += 1\n",
      "            elif stride == 2:\n",
      "                if C_out == 'm':\n",
      "                    block_name = 'max_pool_k2'\n",
      "                    self.layers.append(max_pool_k2(C_in, C_out, stride, self.snn_params))\n",
      "                    C_out = C_in\n",
      "                else:\n",
      "                    block_id = block_ids[self.num_TBDs]\n",
      "                    self.layers.append(SpikingBlockSet(C_in, C_out, stride, self.snn_params))\n",
      "                    self.num_TBDs += 1\n",
      "                img_size = img_size // 2\n",
      "            C_in = C_out\n",
      "        # FC layer\n",
      "        if use_GAP:\n",
      "            self.avgpool = nn.Sequential(\n",
      "                nn.AdaptiveAvgPool2d((1, 1)),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            img_size = 1\n",
      "        else:\n",
      "            self.avgpool = None\n",
      "        C_in = C_in * img_size * img_size\n",
      "\n",
      "        self.fc = nn.Sequential(\n",
      "            nn.Flatten(),\n",
      "            nn.Linear(C_in, num_class * 10, bias=False),\n",
      "            NEURON[neuron](init_tau, v_threshold),\n",
      "        )\n",
      "        self.boost = nn.AvgPool1d(10, 10)\n",
      "\n",
      "    def forward(self, x):\n",
      "        if self.is_DVS_data:\n",
      "            out_spikes_counter = None\n",
      "            x = x.permute(1, 0, 2, 3, 4) # [T, N, 2, *, *]\n",
      "            for t in range(x.shape[0]):\n",
      "                out = self.spike_neuron_stem(self.conv_stem(x[t]))\n",
      "                out, _ = self.DVS_max_pool_1(out)\n",
      "                out = self.DVS_conv_1(out)\n",
      "                out, _ = self.DVS_max_pool_2(out)\n",
      "                for layer in self.layers:\n",
      "                    out, _ = layer(out)\n",
      "                if self.avgpool is not None:\n",
      "                    out = self.avgpool(out)\n",
      "                out = self.fc(out)\n",
      "                if out_spikes_counter is None:\n",
      "                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "                else:\n",
      "                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "            return out_spikes_counter\n",
      "        else:\n",
      "            out_spikes_counter = None\n",
      "            x = self.conv_stem(x)\n",
      "            for t in range(self.T):\n",
      "                out = self.spike_neuron_stem(x)\n",
      "                if self.is_tiny_imageNet_200:\n",
      "                    out, _ = self.ImageNet_max_pool(out)\n",
      "                    out = self.ImageNet_conv(out)\n",
      "                for layer in self.layers:\n",
      "                    out, _ = layer(out)\n",
      "                if self.avgpool is not None:\n",
      "                    out = self.avgpool(out)\n",
      "                out = self.fc(out)\n",
      "                if out_spikes_counter is None:\n",
      "                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "                else:\n",
      "                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "            return out_spikes_counter\n",
      "\n",
      "# we can obtain a specific model architecture via the followed function\n",
      "Model_architecture = SpikingNetwork(search_space, block_ids, num_class, snn_params, dataset_name),\n",
      "where search_space='stem_channel': 64, 'block_channels': [64, 'm', 128, 128, 'm', 256, 256, 'm'], 'strides': [1, 2, 1, 1, 2, 1, 1, 2], 'use_GAP': False \n",
      "num_class=10, dataset_name=CIFAR10, \n",
      "snn_params='T': 8, 'init_tau': 2.0, 'v_threshold': 1.0, 'neuron': PLIF, 'is_DVS_data': False\n",
      "\n",
      "## other information\n",
      "NEURON = {\n",
      "  'IF': lambda tau, v_threshold: IFNode(v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "  'LIF': lambda tau, v_threshold: LIFNode(tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "  'PLIF': lambda tau, v_threshold: PLIFNode(init_tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "}\n",
      "# SpikingBlockSet class is the basical building block of the network\n",
      "class SpikingBlockSet(nn.Module):\n",
      "    def __init__(self, block_id, C_in, C_out, stride, snn_params):\n",
      "        super(SpikingBlockSet, self).__init__()\n",
      "        self.block = operators[block_id](C_in, C_out, stride, snn_params)\n",
      "    def forward(self, x):\n",
      "        return self.block(x)\n",
      "```\n",
      "\n",
      "\n",
      "## Skills\n",
      "\n",
      "1. Analyze user requests and datasets in the Requirements above.\n",
      "2. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints above. Regenerate candidate solutions if they do not meet the constraints until all constraints are satisfied.\n",
      "\n",
      "## Rules\n",
      "\n",
      "1. The generated candidate solutions must be complete and correct in format.\n",
      "2. Ensure the candidate solutions meet all constraints above. Regenerate if not satisfied until all constraints are met.\n",
      "3. The generated candidate solutions must be in the following format: Examples\n",
      "TBD layers configured as: [1,2,3,4,0].\n",
      " namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\n",
      " Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above..\n",
      "\n",
      "## Workflow\n",
      "\n",
      "1. Analyze user requests and datasets in the Requirements above, understand the type, features, target variables, and related application backgrounds of the datasets.\n",
      "2. Understand model performance and hardware efficiency using Eval logs, and identify potential improvement points.\n",
      "3. Based on Knowledges and eval logs above, automatically use appropriate search strategies for neural network search, suitable optimization methods for hyperparameter optimization, and generate candidate solutions.\n",
      "4. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints. If not, return to step 4 and regenerate candidate solutions until all constraints are satisfied.\n",
      "5. Return the generated candidate solutions to the user with specific format.\n",
      "\n",
      "## Initialization\n",
      "\n",
      "As an AutoML Candidate Generator, your task is to generate candidate sets to optimize the performance and hardware efficiency of SNNs, and must follow the Rules.\n",
      "\n",
      "Firstly, analyze user requests and datasets in the Requirements above, then evaluate model performance and hardware efficiency using Eval logs. Next, based on Knowledges and constraints information, generate candidate solutions through neural network search and hyperparameter optimization. Finally, preliminaryly evaluate the generated candidate solutions to ensure the quality of the generated content.\n"
     ]
    }
   ],
   "source": [
    "# list_set = set(dynamic.task.space.input_vars)\n",
    "# print(list_set)\n",
    "# config_keys_set = set(space_dict.keys())\n",
    "# print(config_keys_set)\n",
    "# all_in_keys = list_set.issubset(config_keys_set)\n",
    "# print(all_in_keys)\n",
    "prompt = dynamic.get_init_prompt()\n",
    "print(prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "from search_arch.LLMconfig.config import *\n",
    "from search_arch.LLMconfig.database import *\n",
    "# client = ChatOpenAI(model_name=openai_model_name, temperature=0)\n",
    "# client = ChatZhipuAI(model='glm-4-flash',temperature=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Based on the requirements and constraints provided, I will generate a candidate architecture for the SNN model designed to classify 32x32-pixel images into 10 categories. The model must achieve a fitness score of ≥85% on the test set, with accuracy exceeding 80% and optimized spiking numbers.\n",
      "\n",
      "### Analysis of Requirements:\n",
      "1. **Dataset**: 32x32-pixel images, 10 categories (e.g., CIFAR-10).\n",
      "2. **Preprocessing**: Normalization, random cropping, and horizontal flipping.\n",
      "3. **Model Requirements**:\n",
      "   - Batch normalization for generalization.\n",
      "   - Fitness score ≥85% (accuracy * (infer_spiking_numbers / 316839.057475)^-0.08).\n",
      "   - Accuracy >80%.\n",
      "4. **Search Space**:\n",
      "   - Five TBD layers, each chosen from: skip_connection (0), SCB_k3 (1), SCB_k5 (2), SRB_k3 (3), SRB_k5 (4).\n",
      "\n",
      "### Candidate Architecture:\n",
      "To balance accuracy and spiking efficiency, I propose the following architecture:\n",
      "- **1st Layer**: SCB_k3 (1) - A Spiking Convolution Block with a 3x3 kernel for initial feature extraction.\n",
      "- **2nd Layer**: SCB_k5 (2) - A Spiking Convolution Block with a 5x5 kernel to capture broader spatial features.\n",
      "- **3rd Layer**: SRB_k3 (3) - A Spiking Residual Block with a 3x3 kernel to enhance feature propagation.\n",
      "- **4th Layer**: SRB_k5 (4) - A Spiking Residual Block with a 5x5 kernel for deeper feature extraction.\n",
      "- **5th Layer**: Skip Connection (0) - To facilitate gradient flow and reduce spiking activity.\n",
      "\n",
      "### Candidate Solution:\n",
      "```\n",
      "TBD layers configured as: [1, 2, 3, 4, 0].\n",
      "```\n",
      "- **1st Layer**: SCB_k3 (1)\n",
      "- **2nd Layer**: SCB_k5 (2)\n",
      "- **3rd Layer**: SRB_k3 (3)\n",
      "- **4th Layer**: SRB_k5 (4)\n",
      "- **5th Layer**: Skip Connection (0)\n",
      "\n",
      "### Preliminary Evaluation:\n",
      "1. **Accuracy**: The combination of SCB and SRB layers with skip connections is expected to achieve high accuracy (>80%) by effectively extracting and propagating features.\n",
      "2. **Spiking Numbers**: The use of skip connections and residual blocks reduces redundant spiking activity, optimizing the spiking numbers.\n",
      "3. **Fitness Score**: The architecture is designed to balance accuracy and spiking efficiency, ensuring a fitness score ≥85%.\n",
      "\n",
      "This candidate solution meets all constraints and is ready for further evaluation and training.\n"
     ]
    }
   ],
   "source": [
    "config_str = responsellm(client=client, prompt=prompt)\n",
    "print(config_str)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'block_ids': [1, 2, 3, 4, 0]}\n"
     ]
    }
   ],
   "source": [
    "def parse_config(config_str:str)->Dict:\n",
    "        # TODO : parse the config_str to a ython -c \"dict\n",
    "        # 完善ResponseSchema\n",
    "        result_response_schema = [\n",
    "                ResponseSchema(\n",
    "                name=\"block_ids\", \n",
    "                description=\"The configuration of the five TBDs layers and is a separated Python List[int] with lenght=5, block_ids=[block_id1, block_id2, ...].\\\n",
    "                        If this information is not found, output [].\",\n",
    "                type=\"List[int]\"\n",
    "            )\n",
    "        ]\n",
    "        config_dict = config2json(client, config_str, result_response_schema)\n",
    "        return config_dict\n",
    "config_dict = parse_config(config_str)\n",
    "print(config_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1, 2, 3, 4, 0]\n"
     ]
    }
   ],
   "source": [
    "trace = []\n",
    "block_ids = config_dict.get('block_ids',None)\n",
    "if isinstance(block_ids, str):\n",
    "    block_ids = eval(block_ids)\n",
    "assert not isempty(block_ids), \"block_ids不能为空\"\n",
    "assert (len(block_ids) == 5), f\"block_ids:{block_ids}的长度必须为5\"\n",
    "trace.append(block_ids)\n",
    "print(block_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "def decode_arch(block_ids:List[int])->List[int]:\n",
    "        \"\"\" decode only containing TBDs layer to complete arch\n",
    "        \"\"\"\n",
    "        complete_arch = deepcopy(block_ids)\n",
    "        complete_arch.insert(1,-1)\n",
    "        complete_arch.insert(-2,-1)\n",
    "        complete_arch.append(-1)\n",
    "        return complete_arch\n",
    "import utils\n",
    "from torch.nn import functional as F\n",
    "from spikingjelly.clock_driven import functional\n",
    "def infer(loader, net, block_ids=None):\n",
    "        assert(block_ids is not None)\n",
    "        top1 = utils.AverageMeter()\n",
    "        num_spikes = utils.AverageMeter()\n",
    "        net.eval()\n",
    "\n",
    "        total_correct = 0\n",
    "        total_num = 0\n",
    "        num_steps = len(loader)\n",
    "        with torch.no_grad():\n",
    "            for step, (input, target) in enumerate(loader):\n",
    "                input = input.cuda()\n",
    "                target = target.cuda()\n",
    "\n",
    "                out_spikes_counter, num_of_spikes, _ = net(input, block_ids)\n",
    "                functional.reset_net(net)\n",
    "                acc = (out_spikes_counter.argmax(dim=1) == target).float().sum()\n",
    "                total_correct += acc\n",
    "\n",
    "                n = input.size(0)\n",
    "                total_num += n\n",
    "                top1.update(acc.item(), n)\n",
    "                num_spikes.update(num_of_spikes, n)\n",
    "        return total_correct / total_num, num_spikes.avg\n",
    "def _round(x)->float:\n",
    "        if isinstance(x, torch.Tensor):\n",
    "            return round(x.item(), DIGIT)\n",
    "        elif isinstance(x,  int):\n",
    "            return x\n",
    "        else:\n",
    "            return round(x, DIGIT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 50000/50000 [00:12<00:00, 4076.84it/s]\n"
     ]
    }
   ],
   "source": [
    "args.dataset_dir = './data'\n",
    "import data\n",
    "train_loader, valid_loader, args.num_class = data.get_train_val_loaders(args, search=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "from train_supernet.supernet import SpikingNetwork\n",
    "net = SpikingNetwork(args.search_space, args.num_class, snn_params, args)\n",
    "net.load_state_dict(ckpt['net'])\n",
    "net = net.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'accuracy': 0.8766, 'spikes_numbers': 356961.4906, 'fitness': 0.8683}\n"
     ]
    }
   ],
   "source": [
    "complete_block_ids = decode_arch(block_ids)\n",
    "acc, spikes = infer(valid_loader, net, complete_block_ids)\n",
    "fitness = acc * pow(spikes / args.avg_num_spikes, args.fitness_lambda)\n",
    "metrics_dict = {'accuracy': _round(acc), 'spikes_numbers': _round(spikes), 'fitness': _round(fitness)}\n",
    "print(metrics_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# Role\n",
      "\n",
      "AutoML Candidate Generator\n",
      "\n",
      "## Profile\n",
      "\n",
      "This role is specialized in generating high-quality candidate sets in the AutoML field, aimed at assisting users in creating candidate sets to optimize the performance and hardware efficiency of SNNs (Spiking Neural Networks).\n",
      "\n",
      "## Knowledges\n",
      "\n",
      "One shot NAS is an efficient neural network architecture search method that utilizes hypernetworks and weight sharing to reduce search costs. The hypernetwork contains all candidate architectures, with weights shared among subnetworks, avoiding the need to train each architecture separately. Weight sharing allows sub networks to inherit the weights of the hypernetwork, reducing the need for training from scratch.\n",
      "One shot NAS typically uses evolutionary algorithms for architecture search, generating new candidate architectures through mutation and crossover operations, and selecting the optimal architecture based on performance metrics. This method is efficient, scalable, and flexible, and has been widely used in tasks such as image classification and object detection, and has achieved success in SNN fields such as AutoSNN.\n",
      "Direct encoding, using encoding layer(s) to uniformly encode the input image. This schema is simpler, flexible, and more accurate when the simulation timestep is small, and more suitable for deep spiking neural networks\n",
      "The Spiking Convolution Block (SCB) derived from VGGNet comprises two convolutional layers, each followed by BatchNorm and a spiking neuron. The first layer transforms from **C_input** to **C_output** channels, while the second maintains **C_output** channels for both input and output, ensuring consistent feature mapping.\n",
      "The Spiking Residual Block (SRB) derived from ResNet features two convolutional layers, each followed by BatchNorm and spiking neurons. The first layer maps **C_input** to **C_output** channels, while the second retains **C_output** channels. A skip connection adds the input feature map to the output after the second layer, enhancing signal propagation.\n",
      "\n",
      "## Constraints\n",
      "\n",
      "Model must exceed 80% accuracy and achieve a fitness score over 85% on the test set, calculated by fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08, with no specific hardware constraints on inferencing.\n",
      "\n",
      "## Eval logs\n",
      "\n",
      "-epoch: 0,\n",
      "-configurations: 'block_ids': [1, 2, 3, 4, 0],\n",
      "-metrics: 'accuracy': 0.8766, 'spikes_numbers': 356961.4906, 'fitness': 0.8683\n",
      "\n",
      "\n",
      "## Requirements\n",
      "\n",
      "Create an SNN for classifying 32x32-pixel images into 10 categories. The model should feature batch normalization for enhanced generalization. The goal is to score ≥85% fitness (accuracy + spiking numbers) on the test set, proving effectiveness with small, low-res imagery. Metrics include accuracy and spiking numbers. Preprocessing involves normalization, with data augmented by random cropping and horizontal flipping.\n",
      "Examples\n",
      "TBD layers configured as: [1,2,3,4,0].\n",
      " namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\n",
      " Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above.\n",
      "\n",
      "## Search Space\n",
      "\n",
      "Each architecture can be implementation via the followed Pytorch code:\n",
      "\n",
      "This architecture can be represented by a candidate block_id list, i.e., block_ids = [op0, op1, op2, op3, op4, op5], where each TBD layer is with a specific operation such as pooling and convolution.\n",
      "\n",
      "```\n",
      "#There are 5 candidate operation for each TBD layer: \n",
      "operators = {0: 'Identity(C_in, C_out, snn_params) if stride == 1 else FactorizedReduce(C_in, C_out, snn_params)', 1: 'SpikingConvBlock(C_in, C_out, 3, stride, snn_params)', 2: 'SpikingConvBlock(C_in, C_out, 5, stride, snn_params)', 3: 'SpikingResidualBlock(C_in, C_out, 3, stride, snn_params)', 4: 'SpikingResidualBlock(C_in, C_out, 5, stride, snn_params)'}\n",
      "\n",
      "# SpikingNetwork class is used to represent the architecture: \n",
      "class SpikingNetwork(nn.Module):\n",
      "    def __init__(self, search_space, block_ids ,num_class, snn_params, dataset_name):\n",
      "        super(SpikingNetwork, self).__init__()\n",
      "        # SNN parameters\n",
      "        self.snn_params = snn_params\n",
      "        if snn_params is not None:\n",
      "            self.T = snn_params['T']\n",
      "            init_tau = snn_params['init_tau']\n",
      "            v_threshold = snn_params['v_threshold']\n",
      "            neuron = snn_params['neuron']\n",
      "            self.is_DVS_data = snn_params['is_DVS_data']\n",
      "        if dataset_name is not None:\n",
      "            self.is_tiny_imageNet_200 = dataset_name =='Tiny-ImageNet-200'\n",
      "        else:\n",
      "            self.is_tiny_imageNet_200 = False\n",
      "        # build network\n",
      "        C_stem = search_space['stem_channel']\n",
      "        channels = search_space['block_channels']\n",
      "        strides = search_space['strides']\n",
      "        use_GAP = search_space['use_GAP']\n",
      "\n",
      "        img_size = 32\n",
      "        C_in = 3\n",
      "        if self.is_DVS_data:\n",
      "            img_size = 128\n",
      "            C_in =  2\n",
      "        elif self.is_tiny_imageNet_200:\n",
      "            img_size = 64\n",
      "\n",
      "        # stem conv layer\n",
      "        self.conv_stem = nn.Sequential(\n",
      "            nn.Conv2d(C_in, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "            nn.BatchNorm2d(C_stem)\n",
      "        )\n",
      "        self.spike_neuron_stem = NEURON[neuron](init_tau, v_threshold)\n",
      "\n",
      "        if self.is_DVS_data:\n",
      "            self.DVS_max_pool_1 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\n",
      "            img_size = img_size // 2\n",
      "\n",
      "            self.DVS_conv_1 = nn.Sequential(\n",
      "                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "                nn.BatchNorm2d(C_stem),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            self.DVS_max_pool_2 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\n",
      "            img_size = img_size // 2\n",
      "        elif self.is_tiny_imageNet_200:\n",
      "            self.ImageNet_max_pool = max_pool_k2(C_stem, C_stem, stride=2, snn_param)\n",
      "            img_size = img_size // 2\n",
      "            self.ImageNet_conv = nn.Sequential(\n",
      "                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\n",
      "                nn.BatchNorm2d(C_stem),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            \n",
      "        # TBD blocks\n",
      "        self.num_TBDs = 0\n",
      "        self.layers = nn.ModuleList()\n",
      "        C_in = C_stem\n",
      "        for C_out, stride in zip(channels, strides):\n",
      "            if stride == 1:\n",
      "                block_id = block_ids[self.num_TBDs]\n",
      "                self.layers.append(SpikingBlockSet(block_id,C_in, C_out, stride, self.snn_params))\n",
      "                self.num_TBDs += 1\n",
      "            elif stride == 2:\n",
      "                if C_out == 'm':\n",
      "                    block_name = 'max_pool_k2'\n",
      "                    self.layers.append(max_pool_k2(C_in, C_out, stride, self.snn_params))\n",
      "                    C_out = C_in\n",
      "                else:\n",
      "                    block_id = block_ids[self.num_TBDs]\n",
      "                    self.layers.append(SpikingBlockSet(C_in, C_out, stride, self.snn_params))\n",
      "                    self.num_TBDs += 1\n",
      "                img_size = img_size // 2\n",
      "            C_in = C_out\n",
      "        # FC layer\n",
      "        if use_GAP:\n",
      "            self.avgpool = nn.Sequential(\n",
      "                nn.AdaptiveAvgPool2d((1, 1)),\n",
      "                NEURON[neuron](init_tau, v_threshold),\n",
      "            )\n",
      "            img_size = 1\n",
      "        else:\n",
      "            self.avgpool = None\n",
      "        C_in = C_in * img_size * img_size\n",
      "\n",
      "        self.fc = nn.Sequential(\n",
      "            nn.Flatten(),\n",
      "            nn.Linear(C_in, num_class * 10, bias=False),\n",
      "            NEURON[neuron](init_tau, v_threshold),\n",
      "        )\n",
      "        self.boost = nn.AvgPool1d(10, 10)\n",
      "\n",
      "    def forward(self, x):\n",
      "        if self.is_DVS_data:\n",
      "            out_spikes_counter = None\n",
      "            x = x.permute(1, 0, 2, 3, 4) # [T, N, 2, *, *]\n",
      "            for t in range(x.shape[0]):\n",
      "                out = self.spike_neuron_stem(self.conv_stem(x[t]))\n",
      "                out, _ = self.DVS_max_pool_1(out)\n",
      "                out = self.DVS_conv_1(out)\n",
      "                out, _ = self.DVS_max_pool_2(out)\n",
      "                for layer in self.layers:\n",
      "                    out, _ = layer(out)\n",
      "                if self.avgpool is not None:\n",
      "                    out = self.avgpool(out)\n",
      "                out = self.fc(out)\n",
      "                if out_spikes_counter is None:\n",
      "                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "                else:\n",
      "                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "            return out_spikes_counter\n",
      "        else:\n",
      "            out_spikes_counter = None\n",
      "            x = self.conv_stem(x)\n",
      "            for t in range(self.T):\n",
      "                out = self.spike_neuron_stem(x)\n",
      "                if self.is_tiny_imageNet_200:\n",
      "                    out, _ = self.ImageNet_max_pool(out)\n",
      "                    out = self.ImageNet_conv(out)\n",
      "                for layer in self.layers:\n",
      "                    out, _ = layer(out)\n",
      "                if self.avgpool is not None:\n",
      "                    out = self.avgpool(out)\n",
      "                out = self.fc(out)\n",
      "                if out_spikes_counter is None:\n",
      "                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "                else:\n",
      "                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\n",
      "            return out_spikes_counter\n",
      "\n",
      "# we can obtain a specific model architecture via the followed function\n",
      "Model_architecture = SpikingNetwork(search_space, block_ids, num_class, snn_params, dataset_name),\n",
      "where search_space='stem_channel': 64, 'block_channels': [64, 'm', 128, 128, 'm', 256, 256, 'm'], 'strides': [1, 2, 1, 1, 2, 1, 1, 2], 'use_GAP': False \n",
      "num_class=10, dataset_name=CIFAR10, \n",
      "snn_params='T': 8, 'init_tau': 2.0, 'v_threshold': 1.0, 'neuron': PLIF, 'is_DVS_data': False\n",
      "\n",
      "## other information\n",
      "NEURON = {\n",
      "  'IF': lambda tau, v_threshold: IFNode(v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "  'LIF': lambda tau, v_threshold: LIFNode(tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "  'PLIF': lambda tau, v_threshold: PLIFNode(init_tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\n",
      "}\n",
      "# SpikingBlockSet class is the basical building block of the network\n",
      "class SpikingBlockSet(nn.Module):\n",
      "    def __init__(self, block_id, C_in, C_out, stride, snn_params):\n",
      "        super(SpikingBlockSet, self).__init__()\n",
      "        self.block = operators[block_id](C_in, C_out, stride, snn_params)\n",
      "    def forward(self, x):\n",
      "        return self.block(x)\n",
      "```\n",
      "\n",
      "\n",
      "## Skills\n",
      "\n",
      "1. Analyze user requests and datasets in the Requirements above.\n",
      "2. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints above. Regenerate candidate solutions if they do not meet the constraints until all constraints are satisfied.\n",
      "\n",
      "## Rules\n",
      "\n",
      "1. The generated candidate solutions must be complete and correct in format.\n",
      "2. Ensure the candidate solutions meet all constraints above. Regenerate if not satisfied until all constraints are met.\n",
      "3. The generated candidate solutions must be in the following format: Examples\n",
      "TBD layers configured as: [1,2,3,4,0].\n",
      " namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\n",
      " Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above..\n",
      "\n",
      "## Workflow\n",
      "\n",
      "1. Analyze user requests and datasets in the Requirements above, understand the type, features, target variables, and related application backgrounds of the datasets.\n",
      "2. Understand model performance and hardware efficiency using Eval logs, and identify potential improvement points.\n",
      "3. Based on Knowledges and eval logs above, automatically use appropriate search strategies for neural network search, suitable optimization methods for hyperparameter optimization, and generate candidate solutions.\n",
      "4. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints. If not, return to step 4 and regenerate candidate solutions until all constraints are satisfied.\n",
      "5. Return the generated candidate solutions to the user with specific format.\n",
      "\n",
      "## Initialization\n",
      "\n",
      "As an AutoML Candidate Generator, your task is to generate candidate sets to optimize the performance and hardware efficiency of SNNs, and must follow the Rules.\n",
      "\n",
      "Firstly, analyze user requests and datasets in the Requirements above, then evaluate model performance and hardware efficiency using Eval logs. Next, based on Knowledges and constraints information, generate candidate solutions through neural network search and hyperparameter optimization. Finally, preliminaryly evaluate the generated candidate solutions to ensure the quality of the generated content.\n"
     ]
    }
   ],
   "source": [
    "# pprint.pprint(dynamic.config_dict)\n",
    "# dynamic.task.evallog.clean_log()\n",
    "new_prompt = dynamic.update_prompt(config_dict, metrics_dict)\n",
    "print(new_prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: # Role\\n\\nAutoML Candidate Generator\\n\\n## Profile\\n\\nThis role is specialized in generating high-quality candidate sets in the AutoML field, aimed at assisting users in creating candidate sets to optimize the performance and hardware efficiency of SNNs (Spiking Neural Networks).\\n\\n## Knowledges\\n\\nOne shot NAS is an efficient neural network architecture search method that utilizes hypernetworks and weight sharing to reduce search costs. The hypernetwork contains all candidate architectures, with weights shared among subnetworks, avoiding the need to train each architecture separately. Weight sharing allows sub networks to inherit the weights of the hypernetwork, reducing the need for training from scratch.\\nOne shot NAS typically uses evolutionary algorithms for architecture search, generating new candidate architectures through mutation and crossover operations, and selecting the optimal architecture based on performance metrics. This method is efficient, scalable, and flexible, and has been widely used in tasks such as image classification and object detection, and has achieved success in SNN fields such as AutoSNN.\\nDirect encoding, using encoding layer(s) to uniformly encode the input image. This schema is simpler, flexible, and more accurate when the simulation timestep is small, and more suitable for deep spiking neural networks\\nThe Spiking Convolution Block (SCB) derived from VGGNet comprises two convolutional layers, each followed by BatchNorm and a spiking neuron. The first layer transforms from **C_input** to **C_output** channels, while the second maintains **C_output** channels for both input and output, ensuring consistent feature mapping.\\nThe Spiking Residual Block (SRB) derived from ResNet features two convolutional layers, each followed by BatchNorm and spiking neurons. The first layer maps **C_input** to **C_output** channels, while the second retains **C_output** channels. A skip connection adds the input feature map to the output after the second layer, enhancing signal propagation.\\n\\n## Constraints\\n\\nModel must exceed 80% accuracy and achieve a fitness score over 85% on the test set, calculated by fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08, with no specific hardware constraints on inferencing.\\n\\n## Eval logs\\n\\n-epoch: 0,\\n-configurations: 'block_ids': [1, 2, 3, 4, 0],\\n-metrics: 'accuracy': 0.8766, 'spikes_numbers': 356961.4906, 'fitness': 0.8683\\n\\n\\n## Requirements\\n\\nCreate an SNN for classifying 32x32-pixel images into 10 categories. The model should feature batch normalization for enhanced generalization. The goal is to score ≥85% fitness (accuracy + spiking numbers) on the test set, proving effectiveness with small, low-res imagery. Metrics include accuracy and spiking numbers. Preprocessing involves normalization, with data augmented by random cropping and horizontal flipping.\\nExamples\\nTBD layers configured as: [1,2,3,4,0].\\n namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\\n Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above.\\n\\n## Search Space\\n\\nEach architecture can be implementation via the followed Pytorch code:\\n\\nThis architecture can be represented by a candidate block_id list, i.e., block_ids = [op0, op1, op2, op3, op4, op5], where each TBD layer is with a specific operation such as pooling and convolution.\\n\\n```\\n#There are 5 candidate operation for each TBD layer: \\noperators = {0: 'Identity(C_in, C_out, snn_params) if stride == 1 else FactorizedReduce(C_in, C_out, snn_params)', 1: 'SpikingConvBlock(C_in, C_out, 3, stride, snn_params)', 2: 'SpikingConvBlock(C_in, C_out, 5, stride, snn_params)', 3: 'SpikingResidualBlock(C_in, C_out, 3, stride, snn_params)', 4: 'SpikingResidualBlock(C_in, C_out, 5, stride, snn_params)'}\\n\\n# SpikingNetwork class is used to represent the architecture: \\nclass SpikingNetwork(nn.Module):\\n    def __init__(self, search_space, block_ids ,num_class, snn_params, dataset_name):\\n        super(SpikingNetwork, self).__init__()\\n        # SNN parameters\\n        self.snn_params = snn_params\\n        if snn_params is not None:\\n            self.T = snn_params['T']\\n            init_tau = snn_params['init_tau']\\n            v_threshold = snn_params['v_threshold']\\n            neuron = snn_params['neuron']\\n            self.is_DVS_data = snn_params['is_DVS_data']\\n        if dataset_name is not None:\\n            self.is_tiny_imageNet_200 = dataset_name =='Tiny-ImageNet-200'\\n        else:\\n            self.is_tiny_imageNet_200 = False\\n        # build network\\n        C_stem = search_space['stem_channel']\\n        channels = search_space['block_channels']\\n        strides = search_space['strides']\\n        use_GAP = search_space['use_GAP']\\n\\n        img_size = 32\\n        C_in = 3\\n        if self.is_DVS_data:\\n            img_size = 128\\n            C_in =  2\\n        elif self.is_tiny_imageNet_200:\\n            img_size = 64\\n\\n        # stem conv layer\\n        self.conv_stem = nn.Sequential(\\n            nn.Conv2d(C_in, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\\n            nn.BatchNorm2d(C_stem)\\n        )\\n        self.spike_neuron_stem = NEURON[neuron](init_tau, v_threshold)\\n\\n        if self.is_DVS_data:\\n            self.DVS_max_pool_1 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\\n            img_size = img_size // 2\\n\\n            self.DVS_conv_1 = nn.Sequential(\\n                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\\n                nn.BatchNorm2d(C_stem),\\n                NEURON[neuron](init_tau, v_threshold),\\n            )\\n            self.DVS_max_pool_2 = max_pool_k2(C_stem, C_stem, stride=2, snn_params)\\n            img_size = img_size // 2\\n        elif self.is_tiny_imageNet_200:\\n            self.ImageNet_max_pool = max_pool_k2(C_stem, C_stem, stride=2, snn_param)\\n            img_size = img_size // 2\\n            self.ImageNet_conv = nn.Sequential(\\n                nn.Conv2d(C_stem, C_stem, kernel_size=3, stride=1, padding=1, bias=False),\\n                nn.BatchNorm2d(C_stem),\\n                NEURON[neuron](init_tau, v_threshold),\\n            )\\n            \\n        # TBD blocks\\n        self.num_TBDs = 0\\n        self.layers = nn.ModuleList()\\n        C_in = C_stem\\n        for C_out, stride in zip(channels, strides):\\n            if stride == 1:\\n                block_id = block_ids[self.num_TBDs]\\n                self.layers.append(SpikingBlockSet(block_id,C_in, C_out, stride, self.snn_params))\\n                self.num_TBDs += 1\\n            elif stride == 2:\\n                if C_out == 'm':\\n                    block_name = 'max_pool_k2'\\n                    self.layers.append(max_pool_k2(C_in, C_out, stride, self.snn_params))\\n                    C_out = C_in\\n                else:\\n                    block_id = block_ids[self.num_TBDs]\\n                    self.layers.append(SpikingBlockSet(C_in, C_out, stride, self.snn_params))\\n                    self.num_TBDs += 1\\n                img_size = img_size // 2\\n            C_in = C_out\\n        # FC layer\\n        if use_GAP:\\n            self.avgpool = nn.Sequential(\\n                nn.AdaptiveAvgPool2d((1, 1)),\\n                NEURON[neuron](init_tau, v_threshold),\\n            )\\n            img_size = 1\\n        else:\\n            self.avgpool = None\\n        C_in = C_in * img_size * img_size\\n\\n        self.fc = nn.Sequential(\\n            nn.Flatten(),\\n            nn.Linear(C_in, num_class * 10, bias=False),\\n            NEURON[neuron](init_tau, v_threshold),\\n        )\\n        self.boost = nn.AvgPool1d(10, 10)\\n\\n    def forward(self, x):\\n        if self.is_DVS_data:\\n            out_spikes_counter = None\\n            x = x.permute(1, 0, 2, 3, 4) # [T, N, 2, *, *]\\n            for t in range(x.shape[0]):\\n                out = self.spike_neuron_stem(self.conv_stem(x[t]))\\n                out, _ = self.DVS_max_pool_1(out)\\n                out = self.DVS_conv_1(out)\\n                out, _ = self.DVS_max_pool_2(out)\\n                for layer in self.layers:\\n                    out, _ = layer(out)\\n                if self.avgpool is not None:\\n                    out = self.avgpool(out)\\n                out = self.fc(out)\\n                if out_spikes_counter is None:\\n                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\\n                else:\\n                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\\n            return out_spikes_counter\\n        else:\\n            out_spikes_counter = None\\n            x = self.conv_stem(x)\\n            for t in range(self.T):\\n                out = self.spike_neuron_stem(x)\\n                if self.is_tiny_imageNet_200:\\n                    out, _ = self.ImageNet_max_pool(out)\\n                    out = self.ImageNet_conv(out)\\n                for layer in self.layers:\\n                    out, _ = layer(out)\\n                if self.avgpool is not None:\\n                    out = self.avgpool(out)\\n                out = self.fc(out)\\n                if out_spikes_counter is None:\\n                    out_spikes_counter = self.boost(out.unsqueeze(1)).squeeze(1)\\n                else:\\n                    out_spikes_counter += self.boost(out.unsqueeze(1)).squeeze(1)\\n            return out_spikes_counter\\n\\n# we can obtain a specific model architecture via the followed function\\nModel_architecture = SpikingNetwork(search_space, block_ids, num_class, snn_params, dataset_name),\\nwhere search_space='stem_channel': 64, 'block_channels': [64, 'm', 128, 128, 'm', 256, 256, 'm'], 'strides': [1, 2, 1, 1, 2, 1, 1, 2], 'use_GAP': False \\nnum_class=10, dataset_name=CIFAR10, \\nsnn_params='T': 8, 'init_tau': 2.0, 'v_threshold': 1.0, 'neuron': PLIF, 'is_DVS_data': False\\n\\n## other information\\nNEURON = {\\n  'IF': lambda tau, v_threshold: IFNode(v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\\n  'LIF': lambda tau, v_threshold: LIFNode(tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\\n  'PLIF': lambda tau, v_threshold: PLIFNode(init_tau=tau, v_threshold=v_threshold, surrogate_function=surrogate.ATan(), detach_reset=True),\\n}\\n# SpikingBlockSet class is the basical building block of the network\\nclass SpikingBlockSet(nn.Module):\\n    def __init__(self, block_id, C_in, C_out, stride, snn_params):\\n        super(SpikingBlockSet, self).__init__()\\n        self.block = operators[block_id](C_in, C_out, stride, snn_params)\\n    def forward(self, x):\\n        return self.block(x)\\n```\\n\\n\\n## Skills\\n\\n1. Analyze user requests and datasets in the Requirements above.\\n2. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints above. Regenerate candidate solutions if they do not meet the constraints until all constraints are satisfied.\\n\\n## Rules\\n\\n1. The generated candidate solutions must be complete and correct in format.\\n2. Ensure the candidate solutions meet all constraints above. Regenerate if not satisfied until all constraints are met.\\n3. The generated candidate solutions must be in the following format: Examples\\nTBD layers configured as: [1,2,3,4,0].\\n namely, 1st SCB_k3, 2nd SCB_k5, 3rd SRB_k3, 4th SRB_k5, 5th skip_connection.\\n Refer to examples above and the SearchSpace description below, specify the five TBD layers in macro-level architecture, choosing from the five TBDs: skip_connection(0), SCB_k3(1), SCB_k5(2), SRB_k3(3), SRB_k5(4). And Output python list of five non-negative integers, and please keep format same as examples above..\\n\\n## Workflow\\n\\n1. Analyze user requests and datasets in the Requirements above, understand the type, features, target variables, and related application backgrounds of the datasets.\\n2. Understand model performance and hardware efficiency using Eval logs, and identify potential improvement points.\\n3. Based on Knowledges and eval logs above, automatically use appropriate search strategies for neural network search, suitable optimization methods for hyperparameter optimization, and generate candidate solutions.\\n4. Preliminaryly evaluate the generated candidate solutions to ensure they meet all constraints. If not, return to step 4 and regenerate candidate solutions until all constraints are satisfied.\\n5. Return the generated candidate solutions to the user with specific format.\\n\\n## Initialization\\n\\nAs an AutoML Candidate Generator, your task is to generate candidate sets to optimize the performance and hardware efficiency of SNNs, and must follow the Rules.\\n\\nFirstly, analyze user requests and datasets in the Requirements above, then evaluate model performance and hardware efficiency using Eval logs. Next, based on Knowledges and constraints information, generate candidate solutions through neural network search and hyperparameter optimization. Finally, preliminaryly evaluate the generated candidate solutions to ensure the quality of the generated content.\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[llm:ChatOpenAI] [13.33s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"Based on the provided requirements, constraints, and search space, I will generate a candidate architecture for the SNN model. The goal is to achieve a fitness score of ≥85% on the test set, with accuracy exceeding 80% and optimized spiking numbers.\\n\\n### Analysis:\\n1. **Dataset**: 32x32-pixel images classified into 10 categories (e.g., CIFAR-10).\\n2. **Constraints**: \\n   - Accuracy > 80%.\\n   - Fitness score > 85%, calculated as `fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08`.\\n3. **Search Space**: \\n   - Five TBD layers, each chosen from: \\n     - `skip_connection (0)`, \\n     - `SCB_k3 (1)`, \\n     - `SCB_k5 (2)`, \\n     - `SRB_k3 (3)`, \\n     - `SRB_k5 (4)`.\\n4. **Eval Logs**: \\n   - Previous configuration: `[1, 2, 3, 4, 0]` achieved 87.66% accuracy, 356961.4906 spikes, and 86.83% fitness.\\n\\n### Candidate Architecture:\\nTo improve upon the previous configuration, I will prioritize architectures that balance accuracy and spiking efficiency. Based on the knowledge of SCB and SRB blocks, I propose the following candidate:\\n\\n**TBD layers configured as: [1, 3, 2, 4, 0].**\\n- **1st layer**: SCB_k3 (1) - Efficient feature extraction with a smaller kernel.\\n- **2nd layer**: SRB_k3 (3) - Residual block for deeper feature learning with a smaller kernel.\\n- **3rd layer**: SCB_k5 (2) - Broader feature extraction with a larger kernel.\\n- **4th layer**: SRB_k5 (4) - Residual block for deeper feature learning with a larger kernel.\\n- **5th layer**: skip_connection (0) - Skip connection to enhance gradient flow and reduce spiking numbers.\\n\\n### Python List Output:\\n```python\\n[1, 3, 2, 4, 0]\\n```\\n\\n### Preliminary Evaluation:\\n1. **Accuracy**: The combination of SCB and SRB blocks with varying kernel sizes should enhance feature extraction and classification accuracy, likely exceeding 80%.\\n2. **Spiking Numbers**: The inclusion of skip connections and residual blocks should help reduce spiking activity while maintaining performance.\\n3. **Fitness Score**: The architecture is designed to balance accuracy and spiking efficiency, aiming for a fitness score ≥85%.\\n\\nIf this candidate does not meet the constraints, I will regenerate and refine the architecture until all requirements are satisfied. Let me know if further adjustments are needed!\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"Based on the provided requirements, constraints, and search space, I will generate a candidate architecture for the SNN model. The goal is to achieve a fitness score of ≥85% on the test set, with accuracy exceeding 80% and optimized spiking numbers.\\n\\n### Analysis:\\n1. **Dataset**: 32x32-pixel images classified into 10 categories (e.g., CIFAR-10).\\n2. **Constraints**: \\n   - Accuracy > 80%.\\n   - Fitness score > 85%, calculated as `fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08`.\\n3. **Search Space**: \\n   - Five TBD layers, each chosen from: \\n     - `skip_connection (0)`, \\n     - `SCB_k3 (1)`, \\n     - `SCB_k5 (2)`, \\n     - `SRB_k3 (3)`, \\n     - `SRB_k5 (4)`.\\n4. **Eval Logs**: \\n   - Previous configuration: `[1, 2, 3, 4, 0]` achieved 87.66% accuracy, 356961.4906 spikes, and 86.83% fitness.\\n\\n### Candidate Architecture:\\nTo improve upon the previous configuration, I will prioritize architectures that balance accuracy and spiking efficiency. Based on the knowledge of SCB and SRB blocks, I propose the following candidate:\\n\\n**TBD layers configured as: [1, 3, 2, 4, 0].**\\n- **1st layer**: SCB_k3 (1) - Efficient feature extraction with a smaller kernel.\\n- **2nd layer**: SRB_k3 (3) - Residual block for deeper feature learning with a smaller kernel.\\n- **3rd layer**: SCB_k5 (2) - Broader feature extraction with a larger kernel.\\n- **4th layer**: SRB_k5 (4) - Residual block for deeper feature learning with a larger kernel.\\n- **5th layer**: skip_connection (0) - Skip connection to enhance gradient flow and reduce spiking numbers.\\n\\n### Python List Output:\\n```python\\n[1, 3, 2, 4, 0]\\n```\\n\\n### Preliminary Evaluation:\\n1. **Accuracy**: The combination of SCB and SRB blocks with varying kernel sizes should enhance feature extraction and classification accuracy, likely exceeding 80%.\\n2. **Spiking Numbers**: The inclusion of skip connections and residual blocks should help reduce spiking activity while maintaining performance.\\n3. **Fitness Score**: The architecture is designed to balance accuracy and spiking efficiency, aiming for a fitness score ≥85%.\\n\\nIf this candidate does not meet the constraints, I will regenerate and refine the architecture until all requirements are satisfied. Let me know if further adjustments are needed!\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 589,\n",
      "                \"prompt_tokens\": 3281,\n",
      "                \"total_tokens\": 3870,\n",
      "                \"completion_tokens_details\": null,\n",
      "                \"prompt_tokens_details\": null,\n",
      "                \"prompt_cache_hit_tokens\": 384,\n",
      "                \"prompt_cache_miss_tokens\": 2897\n",
      "              },\n",
      "              \"model_name\": \"deepseek-chat\",\n",
      "              \"system_fingerprint\": \"fp_3a5770e1b4\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run-af591acb-05bd-4f9a-869b-fca0bfab4cb9-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 3281,\n",
      "              \"output_tokens\": 589,\n",
      "              \"total_tokens\": 3870,\n",
      "              \"input_token_details\": {},\n",
      "              \"output_token_details\": {}\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 589,\n",
      "      \"prompt_tokens\": 3281,\n",
      "      \"total_tokens\": 3870,\n",
      "      \"completion_tokens_details\": null,\n",
      "      \"prompt_tokens_details\": null,\n",
      "      \"prompt_cache_hit_tokens\": 384,\n",
      "      \"prompt_cache_miss_tokens\": 2897\n",
      "    },\n",
      "    \"model_name\": \"deepseek-chat\",\n",
      "    \"system_fingerprint\": \"fp_3a5770e1b4\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "import langchain\n",
    "\n",
    "langchain.debug = True\n",
    "\n",
    "config_str2 = responsellm(prompt=new_prompt,client=client)\n",
    "langchain.debug = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Based on the provided requirements, constraints, and search space, I will generate a candidate architecture for the SNN model. The goal is to achieve a fitness score of ≥85% on the test set, with accuracy exceeding 80% and optimized spiking numbers.\n",
      "\n",
      "### Analysis:\n",
      "1. **Dataset**: 32x32-pixel images classified into 10 categories (e.g., CIFAR-10).\n",
      "2. **Constraints**: \n",
      "   - Accuracy > 80%.\n",
      "   - Fitness score > 85%, calculated as `fitness = accuracy * (infer_spiking_numbers / 316839.057475)^-0.08`.\n",
      "3. **Search Space**: \n",
      "   - Five TBD layers, each chosen from: \n",
      "     - `skip_connection (0)`, \n",
      "     - `SCB_k3 (1)`, \n",
      "     - `SCB_k5 (2)`, \n",
      "     - `SRB_k3 (3)`, \n",
      "     - `SRB_k5 (4)`.\n",
      "4. **Eval Logs**: \n",
      "   - Previous configuration: `[1, 2, 3, 4, 0]` achieved 87.66% accuracy, 356961.4906 spikes, and 86.83% fitness.\n",
      "\n",
      "### Candidate Architecture:\n",
      "To improve upon the previous configuration, I will prioritize architectures that balance accuracy and spiking efficiency. Based on the knowledge of SCB and SRB blocks, I propose the following candidate:\n",
      "\n",
      "**TBD layers configured as: [1, 3, 2, 4, 0].**\n",
      "- **1st layer**: SCB_k3 (1) - Efficient feature extraction with a smaller kernel.\n",
      "- **2nd layer**: SRB_k3 (3) - Residual block for deeper feature learning with a smaller kernel.\n",
      "- **3rd layer**: SCB_k5 (2) - Broader feature extraction with a larger kernel.\n",
      "- **4th layer**: SRB_k5 (4) - Residual block for deeper feature learning with a larger kernel.\n",
      "- **5th layer**: skip_connection (0) - Skip connection to enhance gradient flow and reduce spiking numbers.\n",
      "\n",
      "### Python List Output:\n",
      "```python\n",
      "[1, 3, 2, 4, 0]\n",
      "```\n",
      "\n",
      "### Preliminary Evaluation:\n",
      "1. **Accuracy**: The combination of SCB and SRB blocks with varying kernel sizes should enhance feature extraction and classification accuracy, likely exceeding 80%.\n",
      "2. **Spiking Numbers**: The inclusion of skip connections and residual blocks should help reduce spiking activity while maintaining performance.\n",
      "3. **Fitness Score**: The architecture is designed to balance accuracy and spiking efficiency, aiming for a fitness score ≥85%.\n",
      "\n",
      "If this candidate does not meet the constraints, I will regenerate and refine the architecture until all requirements are satisfied. Let me know if further adjustments are needed!\n"
     ]
    }
   ],
   "source": [
    "print(config_str2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'block_ids': [1, 3, 2, 4, 0]}\n"
     ]
    }
   ],
   "source": [
    "config_dict2 = parse_config(config_str2)\n",
    "print(config_dict2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python312",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
