{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training_arrays_v1.py:37: UserWarning: A NumPy version >=1.23.5 and <2.3.0 is required for this version of SciPy (detected version 1.23.0)\n",
      "  from scipy.sparse import issparse  # pylint: disable=g-import-not-at-top\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import mesa\n",
    "import random\n",
    "from collections import deque\n",
    "import matplotlib.pyplot as plt\n",
    "from IPython.display import clear_output\n",
    "import tensorflow as tf\n",
    "%matplotlib inline\n",
    "def register_agent(agent_class):\n",
    "    return agent_class"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Agent"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "parameter_grass_grow_speed = 0.02\n",
    "parameter_grass_max_size = 10\n",
    "parameter_grass_born_size = 1\n",
    "\n",
    "parameter_sheep_born_size = 2\n",
    "parameter_sheep_base_energy_cost = 0.01\n",
    "parameter_sheep_move_energy_cost = 0.05\n",
    "parameter_sheep_eat_speed = 2\n",
    "parameter_sheep_observation_radius = 4\n",
    "\n",
    "parameter_sheep_born_size = 2\n",
    "parameter_sheep_base_energy_cost = 0.01\n",
    "parameter_sheep_move_energy_cost = 0.05\n",
    "parameter_sheep_eat_speed = 2\n",
    "parameter_sheep_observation_radius = 6\n",
    "\n",
    "parameter_wolf_born_size = 5\n",
    "parameter_wolf_base_energy_cost = 0.01\n",
    "parameter_wolf_move_energy_cost = 0.05\n",
    "parameter_wolf_eat_speed = 5\n",
    "parameter_wolf_observation_radius = 4\n",
    "\n",
    "parameter_wolf_born_size = 10\n",
    "parameter_wolf_base_energy_cost = 0\n",
    "parameter_wolf_move_energy_cost = 0\n",
    "parameter_wolf_eat_speed = 5\n",
    "parameter_wolf_observation_radius = 4"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Qlearning module"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Conv2D structure\n",
    "class Qlearning:\n",
    "    def __init__(self,observation_dim=(32,32,3),action_map={0: (-1, 0),1: (1, 0),2: (0, -1),3: (0, 1),4: (0, 0)},learning_rate=0.001,discount_factor=0.9,epsilon=1.0,epsilon_decay=0.995,min_epsilon=0.01):\n",
    "        self.observation_dim = observation_dim\n",
    "        self.action_map = action_map\n",
    "        self.learning_rate = learning_rate\n",
    "        self.discount_factor = discount_factor\n",
    "        self.epsilon = epsilon\n",
    "        self.epsilon_decay = epsilon_decay\n",
    "        self.min_epsilon = min_epsilon\n",
    "        self.q_network = self.learning_model()\n",
    "        self.target_network = self.learning_model()\n",
    "        self.update_target_network()\n",
    "        self.memory = deque(maxlen=2048)\n",
    "    def learning_model(self):\n",
    "        model = tf.keras.Sequential([\n",
    "            tf.keras.Input(shape=self.observation_dim),\n",
    "            tf.keras.layers.Conv2D(8,3,1,padding='SAME',activation='relu'),\n",
    "            tf.keras.layers.Conv2D(16,3,1,padding='SAME',activation='relu'),\n",
    "            tf.keras.layers.Flatten(),\n",
    "            tf.keras.layers.Dense(len(self.action_map),activation='sigmoid')\n",
    "        ])\n",
    "        model.compile(optimizer='adam', loss='mse')\n",
    "        return model\n",
    "    def choose_action(self, state):\n",
    "        if np.random.rand() < self.epsilon:\n",
    "            return random.choice(range(len(self.action_map)))\n",
    "        else:\n",
    "            q_values = self.q_network.predict(state.reshape(1, -1),verbose=0)\n",
    "            return np.argmax(q_values[0])\n",
    "    def remember(self, state, action, reward, next_state, done):\n",
    "        self.memory.append((state, action, reward, next_state, done))\n",
    "    def decay(self):\n",
    "        self.epsilon *= self.epsilon_decay\n",
    "    def replay(self, batch_size):\n",
    "        if len(self.memory) < batch_size:\n",
    "            return\n",
    "        batch = random.sample(self.memory, batch_size)\n",
    "        states = np.array([np.array(t[0], dtype=np.float32) for t in batch])\n",
    "        actions = np.array([t[1] for t in batch], dtype=np.int32)\n",
    "        rewards = np.array([t[2] for t in batch], dtype=np.float32)\n",
    "        next_states = np.array([np.array(t[3], dtype=np.float32) for t in batch])\n",
    "        dones = np.array([t[4] for t in batch], dtype=np.bool_)\n",
    "        next_q_values = self.target_network.predict(next_states, verbose=0)\n",
    "        max_next_q = np.max(next_q_values, axis=1)\n",
    "        targets = self.q_network.predict(states, verbose=0)\n",
    "        targets[np.arange(batch_size), actions] = rewards + self.discount_factor * max_next_q * (1 - dones)\n",
    "        self.q_network.fit(states, targets, epochs=1, verbose=0)\n",
    "    def update_target_network(self):\n",
    "        self.target_network.set_weights(self.q_network.get_weights())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Grass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "@register_agent\n",
    "class Grass(mesa.Agent):\n",
    "    def __init__(self,unique_id,model,energy=parameter_grass_born_size):\n",
    "        super().__init__(model)\n",
    "        self.unique_id = unique_id\n",
    "        self.energy = energy\n",
    "    def step(self):\n",
    "        if self.energy < parameter_grass_max_size:\n",
    "            self.energy += parameter_grass_grow_speed"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sheep"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "@register_agent\n",
    "class Sheep(mesa.Agent):\n",
    "    def __init__(self,unique_id,model,energy=parameter_sheep_born_size):\n",
    "        super().__init__(model)\n",
    "        self.unique_id = unique_id\n",
    "        self.energy = energy\n",
    "        self.observation_field = parameter_sheep_observation_radius*2+1\n",
    "        self.reward = 0\n",
    "        self.steps = 0\n",
    "        self.observation = 0\n",
    "        self.reward = 0\n",
    "        self.action_idx = 0\n",
    "        self.next_observation = 0\n",
    "    def update_target_network(self):\n",
    "        self.target_network.set_weights(self.q_network.get_weights())\n",
    "    def step(self):\n",
    "        self.steps += 1\n",
    "        observation = self.observe()\n",
    "        self.memory()\n",
    "        self.die()\n",
    "        self.energy_decay()\n",
    "        action = self.think(observation)\n",
    "        self.move(action)\n",
    "        self.eat()\n",
    "        self.reproduction()\n",
    "    def memory(self):\n",
    "        if self.steps>1:\n",
    "            self.model.brain.remember(self.observation,self.action_idx,self.reward,self.next_observation,self.energy<=0)\n",
    "        self.reward = 0\n",
    "    def energy_decay(self):\n",
    "        self.energy -= parameter_sheep_base_energy_cost\n",
    "        self.reward -= parameter_sheep_base_energy_cost\n",
    "    def observe(self):\n",
    "        observation = np.zeros((self.observation_field,self.observation_field,3))\n",
    "        observation[parameter_sheep_observation_radius,parameter_sheep_observation_radius,1] = self.energy\n",
    "        for neighbor in self.model.grid.get_neighbors(self.pos,moore=True,radius=parameter_sheep_observation_radius):\n",
    "            relative_pos = (np.array(neighbor.pos)-np.array(self.pos)).astype(int)\n",
    "            if isinstance(neighbor,Grass):\n",
    "                observation[parameter_sheep_observation_radius+relative_pos[0],parameter_sheep_observation_radius+relative_pos[1],0] = neighbor.energy\n",
    "            elif isinstance(neighbor,Sheep):\n",
    "                observation[parameter_sheep_observation_radius+relative_pos[0],parameter_sheep_observation_radius+relative_pos[1],1] = neighbor.energy\n",
    "            elif isinstance(neighbor,Wolf):\n",
    "                observation[parameter_sheep_observation_radius+relative_pos[0],parameter_sheep_observation_radius+relative_pos[1],2] = neighbor.energy\n",
    "        for i in range(self.observation_field):\n",
    "            for j in range(self.observation_field):\n",
    "                pos = np.array(self.pos)+np.array([i,j])-np.array([parameter_sheep_observation_radius,parameter_sheep_observation_radius])\n",
    "                if self.model.grid.out_of_bounds(pos):\n",
    "                    observation[i,j] = -1\n",
    "        #observation = (observation - self.model.observation_stats['mean'])/(self.model.observation_stats['std'] + 1e-8)\n",
    "        self.observation = self.next_observation*1\n",
    "        self.next_observation = observation*1\n",
    "        return observation\n",
    "    def think(self,observation):\n",
    "        action_idx = self.model.brain.choose_action(observation)\n",
    "        action = self.model.brain.action_map[action_idx]\n",
    "        self.action_idx = action_idx\n",
    "        return action\n",
    "    def move(self,action):\n",
    "        action = np.array(action)\n",
    "        if np.sum(abs(action))>0:\n",
    "            self.energy -= parameter_sheep_move_energy_cost\n",
    "            self.reward -= parameter_sheep_move_energy_cost\n",
    "        new_pos = (\n",
    "            self.pos[0] + action[0],\n",
    "            self.pos[1] + action[1]\n",
    "        )\n",
    "        if self.model.grid.out_of_bounds(new_pos):\n",
    "            new_pos = self.pos\n",
    "        else:\n",
    "            self.model.grid.move_agent(self, new_pos)\n",
    "    def eat(self):\n",
    "        cell_contents = self.model.grid.get_cell_list_contents([self.pos])\n",
    "        for i in cell_contents:\n",
    "            if isinstance(i,Grass):\n",
    "                eat_amount = min(parameter_sheep_eat_speed,i.energy)\n",
    "                self.energy += eat_amount\n",
    "                self.reward += eat_amount\n",
    "                i.energy -= eat_amount\n",
    "    def reproduction(self):\n",
    "        #if self.energy >= parameter_sheep_born_size*2:\n",
    "        #    self.energy -= parameter_sheep_born_size\n",
    "        #    self.model.add_sheep()\n",
    "        return\n",
    "    def die(self):\n",
    "        if self.energy <= 0:\n",
    "            self.model.remove_agent(self)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Wolf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "@register_agent\n",
    "class Wolf(mesa.Agent):\n",
    "    def __init__(self,unique_id,model,energy=parameter_wolf_born_size):\n",
    "        super().__init__(model)\n",
    "        self.unique_id = unique_id\n",
    "        self.speed = 1.5\n",
    "        self.energy = parameter_wolf_born_size\n",
    "    def random_move(self):\n",
    "        if np.random.rand()<0.5:\n",
    "            possible_moves = self.model.grid.get_neighborhood(self.pos,moore=True,include_center=False)\n",
    "            new_pos = random.choice(possible_moves)\n",
    "            self.model.grid.move_agent(self,new_pos)\n",
    "        else:\n",
    "            pass\n",
    "    def follow_move(self,ratio=0.75):\n",
    "        if np.random.rand()>ratio:\n",
    "            return\n",
    "        target = self.pos\n",
    "        min_distance = float('inf')\n",
    "        for agent in self.model.agents:\n",
    "            if isinstance(agent,Sheep):\n",
    "                distance = self.model.space.get_distance(self.pos,agent.pos)\n",
    "                if distance < min_distance:\n",
    "                    min_distance = distance\n",
    "                    target = agent.pos\n",
    "        if self.pos[0] < target[0]:\n",
    "            return [(self.pos[0]+1, self.pos[1])]\n",
    "        elif self.pos[0] > target[0]:\n",
    "            return [(self.pos[0]-1, self.pos[1])]\n",
    "        elif self.pos[1] < target[1]:\n",
    "            return [(self.pos[0], self.pos[1]+1)]\n",
    "        elif self.pos[1] > target[1]:\n",
    "            return [(self.pos[0], self.pos[1]-1)]\n",
    "    def step(self):\n",
    "        self.random_move()\n",
    "        cell_contents = self.model.grid.get_cell_list_contents([self.pos])\n",
    "        for agent in cell_contents:\n",
    "            if isinstance(agent, Sheep):\n",
    "                eat_amount = min(parameter_wolf_eat_speed, agent.energy)\n",
    "                #self.energy += eat_amount\n",
    "                agent.energy -= eat_amount\n",
    "                agent.reward -= eat_amount"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## World"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class World(mesa.Model):\n",
    "    def __init__(self,brain,width=25,height=25,seed=None):\n",
    "        super().__init__(seed=seed)  \n",
    "        self.width = width\n",
    "        self.height = height\n",
    "        self.brain = brain\n",
    "        self.grid = mesa.space.MultiGrid(width,height,torus=False)\n",
    "        self.history = {'number':[],'energy':[]}\n",
    "        self.steps = 0\n",
    "    def add_agent(self,f):\n",
    "        x = random.randrange(self.width)\n",
    "        y = random.randrange(self.height)\n",
    "        agent = f((x,y),self)\n",
    "        self.grid.place_agent(agent,(x,y))\n",
    "    def remove_agent(self,agent):\n",
    "        self.agents.remove(agent)\n",
    "    def step(self):\n",
    "        self.agents.shuffle_do(\"step\")\n",
    "        self.brain.replay(batch_size=32)\n",
    "        if (self.steps+1) % 10 == 0:\n",
    "            self.brain.update_target_network()\n",
    "        self.brain.decay()\n",
    "    def dynamic_visualization(self,steps=100,interval=0.1):\n",
    "        figure = plt.figure(figsize=(8,6))\n",
    "        plt.rcParams['font.family'] = 'Segoe UI Emoji'\n",
    "        for step in range(steps):\n",
    "            self.steps += 1\n",
    "            self.step()\n",
    "            plt.clf()\n",
    "            ax = plt.gca()\n",
    "            objects = [[],[],[],[]]\n",
    "            self.history['number'].append([0,0,0])\n",
    "            self.history['energy'].append([[],[],[]])\n",
    "            for agent in self.agents:\n",
    "                objects[0].append(agent.pos)\n",
    "                objects[1].append(agent.energy)\n",
    "                if isinstance(agent,Grass):\n",
    "                    objects[2].append([0,1,0])\n",
    "                    objects[3].append('🌿')\n",
    "                    self.history['number'][-1][0] += 1\n",
    "                    self.history['energy'][-1][0].append(agent.energy)\n",
    "                elif isinstance(agent,Sheep):\n",
    "                    objects[2].append([0,0,1])\n",
    "                    objects[3].append('🐑')\n",
    "                    self.history['number'][-1][1] += 1\n",
    "                    self.history['energy'][-1][1].append(agent.energy)\n",
    "                elif isinstance(agent,Wolf):\n",
    "                    objects[2].append([1,0,0])\n",
    "                    objects[3].append('🐺')\n",
    "                    self.history['number'][-1][2] += 1\n",
    "                    self.history['energy'][-1][2].append(agent.energy)\n",
    "            objects[0] = np.array(objects[0])\n",
    "            #plt.scatter(objects[0][:,0],objects[0][:,1],s=objects[1],c=objects[2],alpha=0.6)\n",
    "            for i,j,k in zip(objects[0],objects[3],objects[1]):\n",
    "                plt.text(i[0],i[1],j,ha='center',va='center',fontsize=k)\n",
    "            ax.set_xlim(0,self.width)\n",
    "            ax.set_ylim(0,self.height)\n",
    "            plt.title(f\"Ecosystem Simulation - Step {step}\")\n",
    "            plt.xlabel(\"X Position\")\n",
    "            plt.ylabel(\"Y Position\")\n",
    "            plt.pause(interval)\n",
    "            clear_output(wait=True)\n",
    "        plt.ioff()\n",
    "        plt.show()\n",
    "        return self.history"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Simulation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_grass = 100\n",
    "n_sheep = 50\n",
    "n_wolf = 10\n",
    "seed = 0\n",
    "tmp = (parameter_sheep_observation_radius*2+1)\n",
    "field = (tmp,tmp,3)\n",
    "brain = Qlearning(field)\n",
    "world = World(brain,25,25)\n",
    "for _ in range(n_grass):\n",
    "    world.add_agent(Grass)\n",
    "for _ in range(n_sheep):\n",
    "    world.add_agent(Sheep)\n",
    "for _ in range(n_wolf):\n",
    "    world.add_agent(Wolf)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "InvalidArgumentError",
     "evalue": "Graph execution error:\n\nDetected at node 'sequential/conv2d/Relu' defined at (most recent call last):\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\runpy.py\", line 196, in _run_module_as_main\n      return _run_code(code, main_globals, None,\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\runpy.py\", line 86, in _run_code\n      exec(code, run_globals)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel_launcher.py\", line 18, in <module>\n      app.launch_new_instance()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\traitlets\\config\\application.py\", line 1075, in launch_instance\n      app.start()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelapp.py\", line 739, in start\n      self.io_loop.start()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 205, in start\n      self.asyncio_loop.run_forever()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\base_events.py\", line 603, in run_forever\n      self._run_once()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\base_events.py\", line 1909, in _run_once\n      handle._run()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\events.py\", line 80, in _run\n      self._context.run(self._callback, *self._args)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 545, in dispatch_queue\n      await self.process_one()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 534, in process_one\n      await dispatch(*args)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 437, in dispatch_shell\n      await result\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 362, in execute_request\n      await super().execute_request(stream, ident, parent)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 778, in execute_request\n      reply_content = await reply_content\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 449, in do_execute\n      res = shell.run_cell(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\zmqshell.py\", line 549, in run_cell\n      return super().run_cell(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3075, in run_cell\n      result = self._run_cell(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3130, in _run_cell\n      result = runner(coro)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\async_helpers.py\", line 128, in _pseudo_sync_runner\n      coro.send(None)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3334, in run_cell_async\n      has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3517, in run_ast_nodes\n      if await self.run_code(code, result, async_=asy):\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3577, in run_code\n      exec(code_obj, self.user_global_ns, self.user_ns)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\515241312.py\", line 1, in <module>\n      history = world.dynamic_visualization(steps=10000,interval=0.01)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\1862732892.py\", line 28, in dynamic_visualization\n      self.step()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\model.py\", line 113, in _wrapped_step\n      self._user_step(*args, **kwargs)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\1862732892.py\", line 18, in step\n      self.agents.shuffle_do(\"step\")\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\agent.py\", line 322, in shuffle_do\n      getattr(agent, method)(*args, **kwargs)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\494311869.py\", line 22, in step\n      action = self.think(observation)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\494311869.py\", line 54, in think\n      action_idx = self.model.brain.choose_action(observation)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\3374292324.py\", line 29, in choose_action\n      q_values = self.q_network.predict(state.reshape(1, -1),verbose=0)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2253, in predict\n      tmp_batch_outputs = self.predict_function(iterator)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2041, in predict_function\n      return step_function(self, iterator)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2027, in step_function\n      outputs = model.distribute_strategy.run(run_step, args=(data,))\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2015, in run_step\n      outputs = model.predict_step(data)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 1983, in predict_step\n      return self(x, training=False)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 557, in __call__\n      return super().__call__(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\base_layer.py\", line 1097, in __call__\n      outputs = call_fn(inputs, *args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 96, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\sequential.py\", line 410, in call\n      return super().call(inputs, training=training, mask=mask)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\functional.py\", line 510, in call\n      return self._run_internal_graph(inputs, training=training, mask=mask)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\functional.py\", line 667, in _run_internal_graph\n      outputs = node.layer(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\base_layer.py\", line 1097, in __call__\n      outputs = call_fn(inputs, *args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 96, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\layers\\convolutional\\base_conv.py\", line 314, in call\n      return self.activation(outputs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\activations.py\", line 317, in relu\n      return backend.relu(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\backend.py\", line 5366, in relu\n      x = tf.nn.relu(x)\nNode: 'sequential/conv2d/Relu'\ninput must be 4-dimensional[1,507]\n\t [[{{node sequential/conv2d/Relu}}]] [Op:__inference_predict_function_1276]",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mInvalidArgumentError\u001b[0m                      Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[10], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m history \u001b[38;5;241m=\u001b[39m \u001b[43mworld\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdynamic_visualization\u001b[49m\u001b[43m(\u001b[49m\u001b[43msteps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10000\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43minterval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.01\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[7], line 28\u001b[0m, in \u001b[0;36mWorld.dynamic_visualization\u001b[1;34m(self, steps, interval)\u001b[0m\n\u001b[0;32m     26\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(steps):\n\u001b[0;32m     27\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m---> 28\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstep\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     29\u001b[0m     plt\u001b[38;5;241m.\u001b[39mclf()\n\u001b[0;32m     30\u001b[0m     ax \u001b[38;5;241m=\u001b[39m plt\u001b[38;5;241m.\u001b[39mgca()\n",
      "File \u001b[1;32md:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\model.py:113\u001b[0m, in \u001b[0;36mModel._wrapped_step\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m    111\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m    112\u001b[0m \u001b[38;5;66;03m# Call the original user-defined step method\u001b[39;00m\n\u001b[1;32m--> 113\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_user_step(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "Cell \u001b[1;32mIn[7], line 18\u001b[0m, in \u001b[0;36mWorld.step\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m     17\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mstep\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m---> 18\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43magents\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mshuffle_do\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstep\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m     19\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbrain\u001b[38;5;241m.\u001b[39mreplay(batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m32\u001b[39m)\n\u001b[0;32m     20\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps\u001b[38;5;241m+\u001b[39m\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m%\u001b[39m \u001b[38;5;241m10\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
      "File \u001b[1;32md:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\agent.py:322\u001b[0m, in \u001b[0;36mAgentSet.shuffle_do\u001b[1;34m(self, method, *args, **kwargs)\u001b[0m\n\u001b[0;32m    320\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m ref \u001b[38;5;129;01min\u001b[39;00m weakrefs:\n\u001b[0;32m    321\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m (agent \u001b[38;5;241m:=\u001b[39m ref()) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m--> 322\u001b[0m             \u001b[38;5;28mgetattr\u001b[39m(agent, method)(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    323\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    324\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m ref \u001b[38;5;129;01min\u001b[39;00m weakrefs:\n",
      "Cell \u001b[1;32mIn[5], line 22\u001b[0m, in \u001b[0;36mSheep.step\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m     20\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdie()\n\u001b[0;32m     21\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39menergy_decay()\n\u001b[1;32m---> 22\u001b[0m action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mthink\u001b[49m\u001b[43m(\u001b[49m\u001b[43mobservation\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     23\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmove(action)\n\u001b[0;32m     24\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39meat()\n",
      "Cell \u001b[1;32mIn[5], line 54\u001b[0m, in \u001b[0;36mSheep.think\u001b[1;34m(self, observation)\u001b[0m\n\u001b[0;32m     53\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mthink\u001b[39m(\u001b[38;5;28mself\u001b[39m,observation):\n\u001b[1;32m---> 54\u001b[0m     action_idx \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbrain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchoose_action\u001b[49m\u001b[43m(\u001b[49m\u001b[43mobservation\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     55\u001b[0m     action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel\u001b[38;5;241m.\u001b[39mbrain\u001b[38;5;241m.\u001b[39maction_map[action_idx]\n\u001b[0;32m     56\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maction_idx \u001b[38;5;241m=\u001b[39m action_idx\n",
      "Cell \u001b[1;32mIn[3], line 29\u001b[0m, in \u001b[0;36mQlearning.choose_action\u001b[1;34m(self, state)\u001b[0m\n\u001b[0;32m     27\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m random\u001b[38;5;241m.\u001b[39mchoice(\u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maction_map)))\n\u001b[0;32m     28\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m---> 29\u001b[0m     q_values \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mq_network\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstate\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreshape\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m     30\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m np\u001b[38;5;241m.\u001b[39margmax(q_values[\u001b[38;5;241m0\u001b[39m])\n",
      "File \u001b[1;32md:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py:70\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m     67\u001b[0m     filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[0;32m     68\u001b[0m     \u001b[38;5;66;03m# To get the full stack trace, call:\u001b[39;00m\n\u001b[0;32m     69\u001b[0m     \u001b[38;5;66;03m# `tf.debugging.disable_traceback_filtering()`\u001b[39;00m\n\u001b[1;32m---> 70\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m     71\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m     72\u001b[0m     \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
      "File \u001b[1;32md:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py:54\u001b[0m, in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m     53\u001b[0m   ctx\u001b[38;5;241m.\u001b[39mensure_initialized()\n\u001b[1;32m---> 54\u001b[0m   tensors \u001b[38;5;241m=\u001b[39m pywrap_tfe\u001b[38;5;241m.\u001b[39mTFE_Py_Execute(ctx\u001b[38;5;241m.\u001b[39m_handle, device_name, op_name,\n\u001b[0;32m     55\u001b[0m                                       inputs, attrs, num_outputs)\n\u001b[0;32m     56\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m core\u001b[38;5;241m.\u001b[39m_NotOkStatusException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m     57\u001b[0m   \u001b[38;5;28;01mif\u001b[39;00m name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
      "\u001b[1;31mInvalidArgumentError\u001b[0m: Graph execution error:\n\nDetected at node 'sequential/conv2d/Relu' defined at (most recent call last):\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\runpy.py\", line 196, in _run_module_as_main\n      return _run_code(code, main_globals, None,\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\runpy.py\", line 86, in _run_code\n      exec(code, run_globals)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel_launcher.py\", line 18, in <module>\n      app.launch_new_instance()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\traitlets\\config\\application.py\", line 1075, in launch_instance\n      app.start()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelapp.py\", line 739, in start\n      self.io_loop.start()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 205, in start\n      self.asyncio_loop.run_forever()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\base_events.py\", line 603, in run_forever\n      self._run_once()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\base_events.py\", line 1909, in _run_once\n      handle._run()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\asyncio\\events.py\", line 80, in _run\n      self._context.run(self._callback, *self._args)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 545, in dispatch_queue\n      await self.process_one()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 534, in process_one\n      await dispatch(*args)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 437, in dispatch_shell\n      await result\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 362, in execute_request\n      await super().execute_request(stream, ident, parent)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\kernelbase.py\", line 778, in execute_request\n      reply_content = await reply_content\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\ipkernel.py\", line 449, in do_execute\n      res = shell.run_cell(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\ipykernel\\zmqshell.py\", line 549, in run_cell\n      return super().run_cell(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3075, in run_cell\n      result = self._run_cell(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3130, in _run_cell\n      result = runner(coro)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\async_helpers.py\", line 128, in _pseudo_sync_runner\n      coro.send(None)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3334, in run_cell_async\n      has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3517, in run_ast_nodes\n      if await self.run_code(code, result, async_=asy):\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 3577, in run_code\n      exec(code_obj, self.user_global_ns, self.user_ns)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\515241312.py\", line 1, in <module>\n      history = world.dynamic_visualization(steps=10000,interval=0.01)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\1862732892.py\", line 28, in dynamic_visualization\n      self.step()\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\model.py\", line 113, in _wrapped_step\n      self._user_step(*args, **kwargs)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\1862732892.py\", line 18, in step\n      self.agents.shuffle_do(\"step\")\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\mesa\\agent.py\", line 322, in shuffle_do\n      getattr(agent, method)(*args, **kwargs)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\494311869.py\", line 22, in step\n      action = self.think(observation)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\494311869.py\", line 54, in think\n      action_idx = self.model.brain.choose_action(observation)\n    File \"C:\\Users\\陈旭\\AppData\\Local\\Temp\\ipykernel_56024\\3374292324.py\", line 29, in choose_action\n      q_values = self.q_network.predict(state.reshape(1, -1),verbose=0)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2253, in predict\n      tmp_batch_outputs = self.predict_function(iterator)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2041, in predict_function\n      return step_function(self, iterator)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2027, in step_function\n      outputs = model.distribute_strategy.run(run_step, args=(data,))\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 2015, in run_step\n      outputs = model.predict_step(data)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 1983, in predict_step\n      return self(x, training=False)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\training.py\", line 557, in __call__\n      return super().__call__(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\base_layer.py\", line 1097, in __call__\n      outputs = call_fn(inputs, *args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 96, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\sequential.py\", line 410, in call\n      return super().call(inputs, training=training, mask=mask)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\functional.py\", line 510, in call\n      return self._run_internal_graph(inputs, training=training, mask=mask)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\functional.py\", line 667, in _run_internal_graph\n      outputs = node.layer(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 65, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\engine\\base_layer.py\", line 1097, in __call__\n      outputs = call_fn(inputs, *args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\utils\\traceback_utils.py\", line 96, in error_handler\n      return fn(*args, **kwargs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\layers\\convolutional\\base_conv.py\", line 314, in call\n      return self.activation(outputs)\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\activations.py\", line 317, in relu\n      return backend.relu(\n    File \"d:\\Anacoonda\\envs\\CX_universe\\lib\\site-packages\\keras\\backend.py\", line 5366, in relu\n      x = tf.nn.relu(x)\nNode: 'sequential/conv2d/Relu'\ninput must be 4-dimensional[1,507]\n\t [[{{node sequential/conv2d/Relu}}]] [Op:__inference_predict_function_1276]"
     ]
    }
   ],
   "source": [
    "history = world.dynamic_visualization(steps=10000,interval=0.01)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "CX_universe",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
