<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.10.0" />
<title>tinytroupe.factory API documentation</title>
<meta name="description" content="" />
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
</head>
<body>
<main>
<article id="content">
<header>
<h1 class="title">Module <code>tinytroupe.factory</code></h1>
</header>
<section id="section-intro">
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">import os
import json
import chevron
import logging
import copy
logger = logging.getLogger(&#34;tinytroupe&#34;)

from tinytroupe import openai_utils
from tinytroupe.agent import TinyPerson
import tinytroupe.utils as utils
from tinytroupe.control import transactional

class TinyFactory:
    &#34;&#34;&#34;
    A base class for various types of factories. This is important because it makes it easier to extend the system, particularly 
    regarding transaction caching.
    &#34;&#34;&#34;

    # A dict of all factories created so far.
    all_factories = {} # name -&gt; factories
    
    def __init__(self, simulation_id:str=None) -&gt; None:
        &#34;&#34;&#34;
        Initialize a TinyFactory instance.

        Args:
            simulation_id (str, optional): The ID of the simulation. Defaults to None.
        &#34;&#34;&#34;
        self.name = f&#34;Factory {utils.fresh_id()}&#34; # we need a name, but no point in making it customizable
        self.simulation_id = simulation_id

        TinyFactory.add_factory(self)
    
    def __repr__(self):
        return f&#34;TinyFactory(name=&#39;{self.name}&#39;)&#34;
    
    @staticmethod
    def set_simulation_for_free_factories(simulation):
        &#34;&#34;&#34;
        Sets the simulation if it is None. This allows free environments to be captured by specific simulation scopes
        if desired.
        &#34;&#34;&#34;
        for factory in TinyFactory.all_factories.values():
            if factory.simulation_id is None:
                simulation.add_factory(factory)

    @staticmethod
    def add_factory(factory):
        &#34;&#34;&#34;
        Adds a factory to the list of all factories. Factory names must be unique,
        so if an factory with the same name already exists, an error is raised.
        &#34;&#34;&#34;
        if factory.name in TinyFactory.all_factories:
            raise ValueError(f&#34;Factory names must be unique, but &#39;{factory.name}&#39; is already defined.&#34;)
        else:
            TinyFactory.all_factories[factory.name] = factory
    
    @staticmethod
    def clear_factories():
        &#34;&#34;&#34;
        Clears the global list of all factories.
        &#34;&#34;&#34;
        TinyFactory.all_factories = {}

    ################################################################################################
    # Caching mechanisms
    #
    # Factories can also be cached in a transactional way. This is necessary because the agents they
    # generate can be cached, and we need to ensure that the factory itself is also cached in a 
    # consistent way.
    ################################################################################################

    def encode_complete_state(self) -&gt; dict:
        &#34;&#34;&#34;
        Encodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
        &#34;&#34;&#34;

        state = copy.deepcopy(self.__dict__)
        return state

    def decode_complete_state(self, state:dict):
        &#34;&#34;&#34;
        Decodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
        &#34;&#34;&#34;
        state = copy.deepcopy(state)

        self.__dict__.update(state)
        return self
 

class TinyPersonFactory(TinyFactory):

    def __init__(self, context_text, simulation_id:str=None):
        &#34;&#34;&#34;
        Initialize a TinyPersonFactory instance.

        Args:
            context_text (str): The context text used to generate the TinyPerson instances.
            simulation_id (str, optional): The ID of the simulation. Defaults to None.
        &#34;&#34;&#34;
        super().__init__(simulation_id)
        self.person_prompt_template_path = os.path.join(os.path.dirname(__file__), &#39;prompts/generate_person.mustache&#39;)
        self.context_text = context_text
        self.generated_minibios = [] # keep track of the generated persons. We keep the minibio to avoid generating the same person twice.
        self.generated_names = []

    @staticmethod
    def generate_person_factories(number_of_factories, generic_context_text):
        &#34;&#34;&#34;
        Generate a list of TinyPersonFactory instances using OpenAI&#39;s LLM.

        Args:
            number_of_factories (int): The number of TinyPersonFactory instances to generate.
            generic_context_text (str): The generic context text used to generate the TinyPersonFactory instances.

        Returns:
            list: A list of TinyPersonFactory instances.
        &#34;&#34;&#34;
        
        logger.info(f&#34;Starting the generation of the {number_of_factories} person factories based on that context: {generic_context_text}&#34;)
        
        system_prompt = open(os.path.join(os.path.dirname(__file__), &#39;prompts/generate_person_factory.md&#39;)).read()

        messages = []
        messages.append({&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: system_prompt})

        user_prompt = chevron.render(&#34;Please, create {{number_of_factories}} person descriptions based on the following broad context: {{context}}&#34;, {
            &#34;number_of_factories&#34;: number_of_factories,
            &#34;context&#34;: generic_context_text
        })

        messages.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_prompt})

        response = openai_utils.client().send_message(messages)

        if response is not None:
            result = utils.extract_json(response[&#34;content&#34;])

            factories = []
            for i in range(number_of_factories):
                logger.debug(f&#34;Generating person factory with description: {result[i]}&#34;)
                factories.append(TinyPersonFactory(result[i]))

            return factories

        return None

    def generate_person(self, agent_particularities:str=None, temperature:float=1.5, attepmpts:int=5):
        &#34;&#34;&#34;
        Generate a TinyPerson instance using OpenAI&#39;s LLM.

        Args:
            agent_particularities (str): The particularities of the agent.
            temperature (float): The temperature to use when sampling from the LLM.

        Returns:
            TinyPerson: A TinyPerson instance generated using the LLM.
        &#34;&#34;&#34;

        logger.info(f&#34;Starting the person generation based on that context: {self.context_text}&#34;)

        prompt = chevron.render(open(self.person_prompt_template_path).read(), {
            &#34;context&#34;: self.context_text,
            &#34;agent_particularities&#34;: agent_particularities,
            &#34;already_generated&#34;: [minibio for minibio in self.generated_minibios]
        })

        def aux_generate():

            messages = []
            messages += [{&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;You are a system that generates specifications of artificial entities.&#34;},
                        {&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: prompt}]

            # due to a technicality, we need to call an auxiliary method to be able to use the transactional decorator.
            message = self._aux_model_call(messages=messages, temperature=temperature)

            if message is not None:
                result = utils.extract_json(message[&#34;content&#34;])

                logger.debug(f&#34;Generated person parameters:\n{json.dumps(result, indent=4, sort_keys=True)}&#34;)

                # only accept the generated spec if the name is not already in the generated names, because they must be unique.
                if result[&#34;name&#34;].lower() not in self.generated_names:
                    return result

            return None # no suitable agent was generated
        
        agent_spec = None
        attempt = 0
        while agent_spec is None and attempt &lt; attepmpts:
            try:
                attempt += 1
                agent_spec = aux_generate()
            except Exception as e:
                logger.error(f&#34;Error while generating agent specification: {e}&#34;)
        
        # create the fresh agent
        if agent_spec is not None:
            # the agent is created here. This is why the present method cannot be cached. Instead, an auxiliary method is used
            # for the actual model call, so that it gets cached properly without skipping the agent creation.
            person = TinyPerson(agent_spec[&#34;name&#34;])
            self._setup_agent(person, agent_spec[&#34;_configuration&#34;])
            self.generated_minibios.append(person.minibio())
            self.generated_names.append(person.get(&#34;name&#34;).lower())
            return person
        else:
            logger.error(f&#34;Could not generate an agent after {attepmpts} attempts.&#34;)
            return None
        
    
    @transactional
    def _aux_model_call(self, messages, temperature):
        &#34;&#34;&#34;
        Auxiliary method to make a model call. This is needed in order to be able to use the transactional decorator,
        due too a technicality - otherwise, the agent creation would be skipped during cache reutilization, and
        we don&#39;t want that.
        &#34;&#34;&#34;
        return openai_utils.client().send_message(messages, temperature=temperature)
    
    @transactional
    def _setup_agent(self, agent, configuration):
        &#34;&#34;&#34;
        Sets up the agent with the necessary elements.
        &#34;&#34;&#34;
        for key, value in configuration.items():
            if isinstance(value, list):
                agent.define_several(key, value)
            else:
                agent.define(key, value)
        
        # does not return anything, as we don&#39;t want to cache the agent object itself.
    </code></pre>
</details>
</section>
<section>
</section>
<section>
</section>
<section>
</section>
<section>
<h2 class="section-title" id="header-classes">Classes</h2>
<dl>
<dt id="tinytroupe.factory.TinyFactory"><code class="flex name class">
<span>class <span class="ident">TinyFactory</span></span>
<span>(</span><span>simulation_id: str = None)</span>
</code></dt>
<dd>
<div class="desc"><p>A base class for various types of factories. This is important because it makes it easier to extend the system, particularly
regarding transaction caching.</p>
<p>Initialize a TinyFactory instance.</p>
<h2 id="args">Args</h2>
<dl>
<dt><strong><code>simulation_id</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>The ID of the simulation. Defaults to None.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class TinyFactory:
    &#34;&#34;&#34;
    A base class for various types of factories. This is important because it makes it easier to extend the system, particularly 
    regarding transaction caching.
    &#34;&#34;&#34;

    # A dict of all factories created so far.
    all_factories = {} # name -&gt; factories
    
    def __init__(self, simulation_id:str=None) -&gt; None:
        &#34;&#34;&#34;
        Initialize a TinyFactory instance.

        Args:
            simulation_id (str, optional): The ID of the simulation. Defaults to None.
        &#34;&#34;&#34;
        self.name = f&#34;Factory {utils.fresh_id()}&#34; # we need a name, but no point in making it customizable
        self.simulation_id = simulation_id

        TinyFactory.add_factory(self)
    
    def __repr__(self):
        return f&#34;TinyFactory(name=&#39;{self.name}&#39;)&#34;
    
    @staticmethod
    def set_simulation_for_free_factories(simulation):
        &#34;&#34;&#34;
        Sets the simulation if it is None. This allows free environments to be captured by specific simulation scopes
        if desired.
        &#34;&#34;&#34;
        for factory in TinyFactory.all_factories.values():
            if factory.simulation_id is None:
                simulation.add_factory(factory)

    @staticmethod
    def add_factory(factory):
        &#34;&#34;&#34;
        Adds a factory to the list of all factories. Factory names must be unique,
        so if an factory with the same name already exists, an error is raised.
        &#34;&#34;&#34;
        if factory.name in TinyFactory.all_factories:
            raise ValueError(f&#34;Factory names must be unique, but &#39;{factory.name}&#39; is already defined.&#34;)
        else:
            TinyFactory.all_factories[factory.name] = factory
    
    @staticmethod
    def clear_factories():
        &#34;&#34;&#34;
        Clears the global list of all factories.
        &#34;&#34;&#34;
        TinyFactory.all_factories = {}

    ################################################################################################
    # Caching mechanisms
    #
    # Factories can also be cached in a transactional way. This is necessary because the agents they
    # generate can be cached, and we need to ensure that the factory itself is also cached in a 
    # consistent way.
    ################################################################################################

    def encode_complete_state(self) -&gt; dict:
        &#34;&#34;&#34;
        Encodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
        &#34;&#34;&#34;

        state = copy.deepcopy(self.__dict__)
        return state

    def decode_complete_state(self, state:dict):
        &#34;&#34;&#34;
        Decodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
        &#34;&#34;&#34;
        state = copy.deepcopy(state)

        self.__dict__.update(state)
        return self</code></pre>
</details>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="tinytroupe.factory.TinyPersonFactory" href="#tinytroupe.factory.TinyPersonFactory">TinyPersonFactory</a></li>
</ul>
<h3>Class variables</h3>
<dl>
<dt id="tinytroupe.factory.TinyFactory.all_factories"><code class="name">var <span class="ident">all_factories</span></code></dt>
<dd>
<div class="desc"></div>
</dd>
</dl>
<h3>Static methods</h3>
<dl>
<dt id="tinytroupe.factory.TinyFactory.add_factory"><code class="name flex">
<span>def <span class="ident">add_factory</span></span>(<span>factory)</span>
</code></dt>
<dd>
<div class="desc"><p>Adds a factory to the list of all factories. Factory names must be unique,
so if an factory with the same name already exists, an error is raised.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@staticmethod
def add_factory(factory):
    &#34;&#34;&#34;
    Adds a factory to the list of all factories. Factory names must be unique,
    so if an factory with the same name already exists, an error is raised.
    &#34;&#34;&#34;
    if factory.name in TinyFactory.all_factories:
        raise ValueError(f&#34;Factory names must be unique, but &#39;{factory.name}&#39; is already defined.&#34;)
    else:
        TinyFactory.all_factories[factory.name] = factory</code></pre>
</details>
</dd>
<dt id="tinytroupe.factory.TinyFactory.clear_factories"><code class="name flex">
<span>def <span class="ident">clear_factories</span></span>(<span>)</span>
</code></dt>
<dd>
<div class="desc"><p>Clears the global list of all factories.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@staticmethod
def clear_factories():
    &#34;&#34;&#34;
    Clears the global list of all factories.
    &#34;&#34;&#34;
    TinyFactory.all_factories = {}</code></pre>
</details>
</dd>
<dt id="tinytroupe.factory.TinyFactory.set_simulation_for_free_factories"><code class="name flex">
<span>def <span class="ident">set_simulation_for_free_factories</span></span>(<span>simulation)</span>
</code></dt>
<dd>
<div class="desc"><p>Sets the simulation if it is None. This allows free environments to be captured by specific simulation scopes
if desired.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@staticmethod
def set_simulation_for_free_factories(simulation):
    &#34;&#34;&#34;
    Sets the simulation if it is None. This allows free environments to be captured by specific simulation scopes
    if desired.
    &#34;&#34;&#34;
    for factory in TinyFactory.all_factories.values():
        if factory.simulation_id is None:
            simulation.add_factory(factory)</code></pre>
</details>
</dd>
</dl>
<h3>Methods</h3>
<dl>
<dt id="tinytroupe.factory.TinyFactory.decode_complete_state"><code class="name flex">
<span>def <span class="ident">decode_complete_state</span></span>(<span>self, state: dict)</span>
</code></dt>
<dd>
<div class="desc"><p>Decodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def decode_complete_state(self, state:dict):
    &#34;&#34;&#34;
    Decodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
    &#34;&#34;&#34;
    state = copy.deepcopy(state)

    self.__dict__.update(state)
    return self</code></pre>
</details>
</dd>
<dt id="tinytroupe.factory.TinyFactory.encode_complete_state"><code class="name flex">
<span>def <span class="ident">encode_complete_state</span></span>(<span>self) ‑> dict</span>
</code></dt>
<dd>
<div class="desc"><p>Encodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def encode_complete_state(self) -&gt; dict:
    &#34;&#34;&#34;
    Encodes the complete state of the factory. If subclasses have elmements that are not serializable, they should override this method.
    &#34;&#34;&#34;

    state = copy.deepcopy(self.__dict__)
    return state</code></pre>
</details>
</dd>
</dl>
</dd>
<dt id="tinytroupe.factory.TinyPersonFactory"><code class="flex name class">
<span>class <span class="ident">TinyPersonFactory</span></span>
<span>(</span><span>context_text, simulation_id: str = None)</span>
</code></dt>
<dd>
<div class="desc"><p>A base class for various types of factories. This is important because it makes it easier to extend the system, particularly
regarding transaction caching.</p>
<p>Initialize a TinyPersonFactory instance.</p>
<h2 id="args">Args</h2>
<dl>
<dt><strong><code>context_text</code></strong> :&ensp;<code>str</code></dt>
<dd>The context text used to generate the TinyPerson instances.</dd>
<dt><strong><code>simulation_id</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>The ID of the simulation. Defaults to None.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class TinyPersonFactory(TinyFactory):

    def __init__(self, context_text, simulation_id:str=None):
        &#34;&#34;&#34;
        Initialize a TinyPersonFactory instance.

        Args:
            context_text (str): The context text used to generate the TinyPerson instances.
            simulation_id (str, optional): The ID of the simulation. Defaults to None.
        &#34;&#34;&#34;
        super().__init__(simulation_id)
        self.person_prompt_template_path = os.path.join(os.path.dirname(__file__), &#39;prompts/generate_person.mustache&#39;)
        self.context_text = context_text
        self.generated_minibios = [] # keep track of the generated persons. We keep the minibio to avoid generating the same person twice.
        self.generated_names = []

    @staticmethod
    def generate_person_factories(number_of_factories, generic_context_text):
        &#34;&#34;&#34;
        Generate a list of TinyPersonFactory instances using OpenAI&#39;s LLM.

        Args:
            number_of_factories (int): The number of TinyPersonFactory instances to generate.
            generic_context_text (str): The generic context text used to generate the TinyPersonFactory instances.

        Returns:
            list: A list of TinyPersonFactory instances.
        &#34;&#34;&#34;
        
        logger.info(f&#34;Starting the generation of the {number_of_factories} person factories based on that context: {generic_context_text}&#34;)
        
        system_prompt = open(os.path.join(os.path.dirname(__file__), &#39;prompts/generate_person_factory.md&#39;)).read()

        messages = []
        messages.append({&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: system_prompt})

        user_prompt = chevron.render(&#34;Please, create {{number_of_factories}} person descriptions based on the following broad context: {{context}}&#34;, {
            &#34;number_of_factories&#34;: number_of_factories,
            &#34;context&#34;: generic_context_text
        })

        messages.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_prompt})

        response = openai_utils.client().send_message(messages)

        if response is not None:
            result = utils.extract_json(response[&#34;content&#34;])

            factories = []
            for i in range(number_of_factories):
                logger.debug(f&#34;Generating person factory with description: {result[i]}&#34;)
                factories.append(TinyPersonFactory(result[i]))

            return factories

        return None

    def generate_person(self, agent_particularities:str=None, temperature:float=1.5, attepmpts:int=5):
        &#34;&#34;&#34;
        Generate a TinyPerson instance using OpenAI&#39;s LLM.

        Args:
            agent_particularities (str): The particularities of the agent.
            temperature (float): The temperature to use when sampling from the LLM.

        Returns:
            TinyPerson: A TinyPerson instance generated using the LLM.
        &#34;&#34;&#34;

        logger.info(f&#34;Starting the person generation based on that context: {self.context_text}&#34;)

        prompt = chevron.render(open(self.person_prompt_template_path).read(), {
            &#34;context&#34;: self.context_text,
            &#34;agent_particularities&#34;: agent_particularities,
            &#34;already_generated&#34;: [minibio for minibio in self.generated_minibios]
        })

        def aux_generate():

            messages = []
            messages += [{&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;You are a system that generates specifications of artificial entities.&#34;},
                        {&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: prompt}]

            # due to a technicality, we need to call an auxiliary method to be able to use the transactional decorator.
            message = self._aux_model_call(messages=messages, temperature=temperature)

            if message is not None:
                result = utils.extract_json(message[&#34;content&#34;])

                logger.debug(f&#34;Generated person parameters:\n{json.dumps(result, indent=4, sort_keys=True)}&#34;)

                # only accept the generated spec if the name is not already in the generated names, because they must be unique.
                if result[&#34;name&#34;].lower() not in self.generated_names:
                    return result

            return None # no suitable agent was generated
        
        agent_spec = None
        attempt = 0
        while agent_spec is None and attempt &lt; attepmpts:
            try:
                attempt += 1
                agent_spec = aux_generate()
            except Exception as e:
                logger.error(f&#34;Error while generating agent specification: {e}&#34;)
        
        # create the fresh agent
        if agent_spec is not None:
            # the agent is created here. This is why the present method cannot be cached. Instead, an auxiliary method is used
            # for the actual model call, so that it gets cached properly without skipping the agent creation.
            person = TinyPerson(agent_spec[&#34;name&#34;])
            self._setup_agent(person, agent_spec[&#34;_configuration&#34;])
            self.generated_minibios.append(person.minibio())
            self.generated_names.append(person.get(&#34;name&#34;).lower())
            return person
        else:
            logger.error(f&#34;Could not generate an agent after {attepmpts} attempts.&#34;)
            return None
        
    
    @transactional
    def _aux_model_call(self, messages, temperature):
        &#34;&#34;&#34;
        Auxiliary method to make a model call. This is needed in order to be able to use the transactional decorator,
        due too a technicality - otherwise, the agent creation would be skipped during cache reutilization, and
        we don&#39;t want that.
        &#34;&#34;&#34;
        return openai_utils.client().send_message(messages, temperature=temperature)
    
    @transactional
    def _setup_agent(self, agent, configuration):
        &#34;&#34;&#34;
        Sets up the agent with the necessary elements.
        &#34;&#34;&#34;
        for key, value in configuration.items():
            if isinstance(value, list):
                agent.define_several(key, value)
            else:
                agent.define(key, value)
        
        # does not return anything, as we don&#39;t want to cache the agent object itself.</code></pre>
</details>
<h3>Ancestors</h3>
<ul class="hlist">
<li><a title="tinytroupe.factory.TinyFactory" href="#tinytroupe.factory.TinyFactory">TinyFactory</a></li>
</ul>
<h3>Static methods</h3>
<dl>
<dt id="tinytroupe.factory.TinyPersonFactory.generate_person_factories"><code class="name flex">
<span>def <span class="ident">generate_person_factories</span></span>(<span>number_of_factories, generic_context_text)</span>
</code></dt>
<dd>
<div class="desc"><p>Generate a list of TinyPersonFactory instances using OpenAI's LLM.</p>
<h2 id="args">Args</h2>
<dl>
<dt><strong><code>number_of_factories</code></strong> :&ensp;<code>int</code></dt>
<dd>The number of TinyPersonFactory instances to generate.</dd>
<dt><strong><code>generic_context_text</code></strong> :&ensp;<code>str</code></dt>
<dd>The generic context text used to generate the TinyPersonFactory instances.</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>list</code></dt>
<dd>A list of TinyPersonFactory instances.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@staticmethod
def generate_person_factories(number_of_factories, generic_context_text):
    &#34;&#34;&#34;
    Generate a list of TinyPersonFactory instances using OpenAI&#39;s LLM.

    Args:
        number_of_factories (int): The number of TinyPersonFactory instances to generate.
        generic_context_text (str): The generic context text used to generate the TinyPersonFactory instances.

    Returns:
        list: A list of TinyPersonFactory instances.
    &#34;&#34;&#34;
    
    logger.info(f&#34;Starting the generation of the {number_of_factories} person factories based on that context: {generic_context_text}&#34;)
    
    system_prompt = open(os.path.join(os.path.dirname(__file__), &#39;prompts/generate_person_factory.md&#39;)).read()

    messages = []
    messages.append({&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: system_prompt})

    user_prompt = chevron.render(&#34;Please, create {{number_of_factories}} person descriptions based on the following broad context: {{context}}&#34;, {
        &#34;number_of_factories&#34;: number_of_factories,
        &#34;context&#34;: generic_context_text
    })

    messages.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_prompt})

    response = openai_utils.client().send_message(messages)

    if response is not None:
        result = utils.extract_json(response[&#34;content&#34;])

        factories = []
        for i in range(number_of_factories):
            logger.debug(f&#34;Generating person factory with description: {result[i]}&#34;)
            factories.append(TinyPersonFactory(result[i]))

        return factories

    return None</code></pre>
</details>
</dd>
</dl>
<h3>Methods</h3>
<dl>
<dt id="tinytroupe.factory.TinyPersonFactory.generate_person"><code class="name flex">
<span>def <span class="ident">generate_person</span></span>(<span>self, agent_particularities: str = None, temperature: float = 1.5, attepmpts: int = 5)</span>
</code></dt>
<dd>
<div class="desc"><p>Generate a TinyPerson instance using OpenAI's LLM.</p>
<h2 id="args">Args</h2>
<dl>
<dt><strong><code>agent_particularities</code></strong> :&ensp;<code>str</code></dt>
<dd>The particularities of the agent.</dd>
<dt><strong><code>temperature</code></strong> :&ensp;<code>float</code></dt>
<dd>The temperature to use when sampling from the LLM.</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>TinyPerson</code></dt>
<dd>A TinyPerson instance generated using the LLM.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def generate_person(self, agent_particularities:str=None, temperature:float=1.5, attepmpts:int=5):
    &#34;&#34;&#34;
    Generate a TinyPerson instance using OpenAI&#39;s LLM.

    Args:
        agent_particularities (str): The particularities of the agent.
        temperature (float): The temperature to use when sampling from the LLM.

    Returns:
        TinyPerson: A TinyPerson instance generated using the LLM.
    &#34;&#34;&#34;

    logger.info(f&#34;Starting the person generation based on that context: {self.context_text}&#34;)

    prompt = chevron.render(open(self.person_prompt_template_path).read(), {
        &#34;context&#34;: self.context_text,
        &#34;agent_particularities&#34;: agent_particularities,
        &#34;already_generated&#34;: [minibio for minibio in self.generated_minibios]
    })

    def aux_generate():

        messages = []
        messages += [{&#34;role&#34;: &#34;system&#34;, &#34;content&#34;: &#34;You are a system that generates specifications of artificial entities.&#34;},
                    {&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: prompt}]

        # due to a technicality, we need to call an auxiliary method to be able to use the transactional decorator.
        message = self._aux_model_call(messages=messages, temperature=temperature)

        if message is not None:
            result = utils.extract_json(message[&#34;content&#34;])

            logger.debug(f&#34;Generated person parameters:\n{json.dumps(result, indent=4, sort_keys=True)}&#34;)

            # only accept the generated spec if the name is not already in the generated names, because they must be unique.
            if result[&#34;name&#34;].lower() not in self.generated_names:
                return result

        return None # no suitable agent was generated
    
    agent_spec = None
    attempt = 0
    while agent_spec is None and attempt &lt; attepmpts:
        try:
            attempt += 1
            agent_spec = aux_generate()
        except Exception as e:
            logger.error(f&#34;Error while generating agent specification: {e}&#34;)
    
    # create the fresh agent
    if agent_spec is not None:
        # the agent is created here. This is why the present method cannot be cached. Instead, an auxiliary method is used
        # for the actual model call, so that it gets cached properly without skipping the agent creation.
        person = TinyPerson(agent_spec[&#34;name&#34;])
        self._setup_agent(person, agent_spec[&#34;_configuration&#34;])
        self.generated_minibios.append(person.minibio())
        self.generated_names.append(person.get(&#34;name&#34;).lower())
        return person
    else:
        logger.error(f&#34;Could not generate an agent after {attepmpts} attempts.&#34;)
        return None</code></pre>
</details>
</dd>
</dl>
<h3>Inherited members</h3>
<ul class="hlist">
<li><code><b><a title="tinytroupe.factory.TinyFactory" href="#tinytroupe.factory.TinyFactory">TinyFactory</a></b></code>:
<ul class="hlist">
<li><code><a title="tinytroupe.factory.TinyFactory.add_factory" href="#tinytroupe.factory.TinyFactory.add_factory">add_factory</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.clear_factories" href="#tinytroupe.factory.TinyFactory.clear_factories">clear_factories</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.decode_complete_state" href="#tinytroupe.factory.TinyFactory.decode_complete_state">decode_complete_state</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.encode_complete_state" href="#tinytroupe.factory.TinyFactory.encode_complete_state">encode_complete_state</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.set_simulation_for_free_factories" href="#tinytroupe.factory.TinyFactory.set_simulation_for_free_factories">set_simulation_for_free_factories</a></code></li>
</ul>
</li>
</ul>
</dd>
</dl>
</section>
</article>
<nav id="sidebar">
<h1>Index</h1>
<div class="toc">
<ul></ul>
</div>
<ul id="index">
<li><h3>Super-module</h3>
<ul>
<li><code><a title="tinytroupe" href="index.html">tinytroupe</a></code></li>
</ul>
</li>
<li><h3><a href="#header-classes">Classes</a></h3>
<ul>
<li>
<h4><code><a title="tinytroupe.factory.TinyFactory" href="#tinytroupe.factory.TinyFactory">TinyFactory</a></code></h4>
<ul class="">
<li><code><a title="tinytroupe.factory.TinyFactory.add_factory" href="#tinytroupe.factory.TinyFactory.add_factory">add_factory</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.all_factories" href="#tinytroupe.factory.TinyFactory.all_factories">all_factories</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.clear_factories" href="#tinytroupe.factory.TinyFactory.clear_factories">clear_factories</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.decode_complete_state" href="#tinytroupe.factory.TinyFactory.decode_complete_state">decode_complete_state</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.encode_complete_state" href="#tinytroupe.factory.TinyFactory.encode_complete_state">encode_complete_state</a></code></li>
<li><code><a title="tinytroupe.factory.TinyFactory.set_simulation_for_free_factories" href="#tinytroupe.factory.TinyFactory.set_simulation_for_free_factories">set_simulation_for_free_factories</a></code></li>
</ul>
</li>
<li>
<h4><code><a title="tinytroupe.factory.TinyPersonFactory" href="#tinytroupe.factory.TinyPersonFactory">TinyPersonFactory</a></code></h4>
<ul class="">
<li><code><a title="tinytroupe.factory.TinyPersonFactory.generate_person" href="#tinytroupe.factory.TinyPersonFactory.generate_person">generate_person</a></code></li>
<li><code><a title="tinytroupe.factory.TinyPersonFactory.generate_person_factories" href="#tinytroupe.factory.TinyPersonFactory.generate_person_factories">generate_person_factories</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
</footer>
</body>
</html>