{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "30b67e4e-5b47-48ce-82e3-2e6757e76f8b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import lru_cache, cached_property\n",
    "\n",
    "class PromptCache:\n",
    "    def __init__(self, cache_size: int = 200):\n",
    "        self.cache_size = cache_size\n",
    "        self._cache     = lru_cache(maxsize=cache_size)(self._raw_cache)()\n",
    "\n",
    "    @cached_property\n",
    "    def cache(self):\n",
    "        return self._cache\n",
    "\n",
    "    def get(self, key):\n",
    "        return self.cache.get(key)\n",
    "\n",
    "    def add(self, key, value):\n",
    "        if key not in self.cache:\n",
    "            self.cache[key] = value\n",
    "\n",
    "    def clear(self):\n",
    "        self.cache.clear()\n",
    "\n",
    "    @staticmethod\n",
    "    def _raw_cache():\n",
    "        return {}\n",
    "\n",
    "    \n",
    "    \n",
    "    \n",
    "import os\n",
    "from glob import glob\n",
    "from typing import List\n",
    "from jinja2 import Template, Environment, FileSystemLoader, meta\n",
    "\n",
    "\n",
    "class TemplateLoader:\n",
    "    \"\"\"\n",
    "    A class for loading and managing Jinja2 templates. It allows loading templates from files or strings,\n",
    "    listing available templates, and getting template variables.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        \"\"\"\n",
    "        Initialize the TemplateLoader object and create an empty dictionary for loaded templates.\n",
    "        \"\"\"\n",
    "        self.loaded_templates = {}\n",
    "\n",
    "    def load_template(\n",
    "        self, template: str, from_string: bool = False\n",
    "    ):\n",
    "        \"\"\"\n",
    "        Load a Jinja2 template either from a string or a file.\n",
    "\n",
    "        Args:\n",
    "            template (str): Template string or path to the template file.\n",
    "            from_string (bool): Whether to load the template from a string. Defaults to False.\n",
    "\n",
    "        Returns:\n",
    "            dict: Loaded template data.\n",
    "        \"\"\"\n",
    "        if template in self.loaded_templates:\n",
    "            return self.loaded_templates[template]\n",
    "\n",
    "        if from_string:\n",
    "            template_instance = Template(template)\n",
    "            template_data = {\n",
    "                \"template_name\": \"from_string\",\n",
    "                \"template_dir\": None,\n",
    "                \"environment\": None,\n",
    "                \"template\": template_instance,\n",
    "            }\n",
    "        else:\n",
    "            template_data = self._load_template_from_path(template)\n",
    "\n",
    "        self.loaded_templates[template] = template_data\n",
    "        return self.loaded_templates[template]\n",
    "\n",
    "    def _load_template_from_path(self, template: str) -> dict:\n",
    "        \"\"\"\n",
    "        Load a Jinja2 template from the given path.\n",
    "\n",
    "        Args:\n",
    "            template (str): Path to the template file.\n",
    "\n",
    "        Returns:\n",
    "            dict: Loaded template data.\n",
    "        \"\"\"\n",
    "        current_dir = os.path.dirname(os.path.realpath('.'))\n",
    "        current_dir, _ = os.path.split(current_dir)\n",
    "        templates_dir = os.path.join(current_dir, \".\")\n",
    "        all_folders = {\n",
    "            f\"{folder}.jinja\": folder for folder in os.listdir(templates_dir)\n",
    "        }\n",
    "\n",
    "        if template in all_folders:\n",
    "            \n",
    "            template_name_, _ = template_name.split(\".jinja\")\n",
    "            template_dir = os.path.join(templates_dir, template_name_)\n",
    "            \n",
    "            environment = Environment(loader=FileSystemLoader(template_dir))\n",
    "            template_instance = environment.get_template(template_name)\n",
    "\n",
    "        else:\n",
    "            self._verify_template_path(template)\n",
    "            custom_template_dir, custom_template_name = os.path.split(template)\n",
    "\n",
    "            template_name = custom_template_name\n",
    "            template_dir = custom_template_dir\n",
    "            environment = Environment(loader=FileSystemLoader(template_dir))\n",
    "            template_instance = environment.get_template(custom_template_name)\n",
    "\n",
    "        return {\n",
    "            \"template_name\": template_name,\n",
    "            \"template_dir\": template_dir,\n",
    "            \"environment\": environment,\n",
    "            \"template\": template_instance,\n",
    "        }\n",
    "\n",
    "    def _get_metadata(self, template_name, template_path):\n",
    "        \n",
    "        template_name, _ = template_name.split(\".jinja\")\n",
    "        metadata_files = glob(\n",
    "            os.path.join(template_path, template_name, \"metadata.json\")\n",
    "        )\n",
    "\n",
    "        metadata = read_json(metadata_files[0])[0]\n",
    "        metadata[\"file_path\"] = os.path.join(template_path, template_name)\n",
    "        return {\"metadata\": metadata}\n",
    "\n",
    "\n",
    "    def _verify_template_path(self, templates_path: str):\n",
    "        \"\"\"\n",
    "        Verify the existence of the template file.\n",
    "\n",
    "        Args:\n",
    "            templates_path (str): Path to the template file.\n",
    "\n",
    "        Raises:\n",
    "            ValueError: If the template file does not exist.\n",
    "        \"\"\"\n",
    "        if not os.path.isfile(templates_path):\n",
    "            raise ValueError(f\"Templates path {templates_path} does not exist\")\n",
    "\n",
    "    def list_templates(self, environment) -> List[str]:\n",
    "        \"\"\"\n",
    "        List all templates in the specified environment.\n",
    "\n",
    "            Args:\n",
    "                environment (Environment): The Jinja2 environment to search for templates.\n",
    "\n",
    "        Returns:\n",
    "            List[str]: List of available template names.\n",
    "        \"\"\"\n",
    "        return environment.list_templates()\n",
    "\n",
    "    def get_template_variables(self, environment, template_name) -> List[str]:\n",
    "        \"\"\"\n",
    "        Get a list of undeclared variables for the specified template.\n",
    "\n",
    "        Args:\n",
    "            environment (Environment): The Jinja2 environment of the template.\n",
    "            template_name (str): The name of the template.\n",
    "\n",
    "        Returns:\n",
    "            List[str]: List of undeclared variables in the template.\n",
    "        \"\"\"\n",
    "        template_source = environment.loader.get_source(environment, template_name)\n",
    "        parsed_content = environment.parse(template_source)\n",
    "        return list(meta.find_undeclared_variables(parsed_content))\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "import json\n",
    "import uuid\n",
    "import datetime\n",
    "from pathlib import Path\n",
    "import hashlib\n",
    "import os\n",
    "import io\n",
    "\n",
    "\n",
    "def read_json(json_file):\n",
    "    \"\"\"\n",
    "    Reads JSON data from a file and returns a Python object.\n",
    "\n",
    "    Args:\n",
    "        json_file (str): The path to the JSON file to read.\n",
    "\n",
    "    Returns:\n",
    "        A Python object representing the JSON data.\n",
    "    \"\"\"\n",
    "    with open(json_file) as f:\n",
    "        try:\n",
    "            data = json.load(f)\n",
    "        except json.JSONDecodeError as e:\n",
    "            raise ValueError(\n",
    "                f\"Error decoding JSON data from file {json_file}: {str(e)}\"\n",
    "            )\n",
    "    return data\n",
    "\n",
    "\n",
    "def write_json(path, data, file_name):\n",
    "    \"\"\"\n",
    "    Writes JSON data to a file.\n",
    "\n",
    "    Args:\n",
    "        path (str): The path to the directory where the file should be saved.\n",
    "        data (Any): The data to write to the file. This can be any JSON-serializable object.\n",
    "        file_name (str): The name of the file to write, without the '.json' extension.\n",
    "\n",
    "    Raises:\n",
    "        IOError: If there is a problem writing the file.\n",
    "    \"\"\"\n",
    "    full_path = os.path.join(path, f\"{file_name}.json\")\n",
    "    try:\n",
    "        with open(full_path, \"w\", encoding=\"utf-8\") as f:\n",
    "            json.dump(data, f, ensure_ascii=False, indent=4)\n",
    "    except IOError as e:\n",
    "        raise IOError(f\"Error writing JSON file '{full_path}': {e.strerror}\")\n",
    "\n",
    "\n",
    "def calculate_hash(text: str, encoding: str = \"utf-8\") -> str:\n",
    "    \"\"\"\n",
    "    Calculate the hash of a text using the specified encoding.\n",
    "\n",
    "    Args:\n",
    "        text: The text to calculate the hash for.\n",
    "        encoding: The encoding to use for the text. Defaults to \"utf-8\".\n",
    "\n",
    "    Returns:\n",
    "        The hash of the text.\n",
    "    \"\"\"\n",
    "    if not isinstance(text, str):\n",
    "        raise TypeError(\"Expected a string for 'text' parameter.\")\n",
    "\n",
    "    hash_obj = hashlib.md5()\n",
    "    hash_obj.update(text.encode(encoding))\n",
    "    return hash_obj.hexdigest()\n",
    "\n",
    "\n",
    "def setup_folder(folder_path: str, folder_name: str = None) -> str:\n",
    "    \"\"\"\n",
    "    Creates a folder in the specified folder_path.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    folder_path : str\n",
    "        The path to the directory where the folder will be created.\n",
    "    folder_name : str, optional\n",
    "        The name of the folder. If None, a name will be generated using the\n",
    "        current date and time and a UUID.\n",
    "\n",
    "    Returns\n",
    "    -------\n",
    "    The path to the created folder.\n",
    "    \"\"\"\n",
    "\n",
    "    if folder_name is None:\n",
    "        current_date = datetime.datetime.now().strftime(\"%Y_%m_%d:%H:%M:%S\")\n",
    "        conversation_id = uuid.uuid4()\n",
    "        folder_name = f\"{current_date}_{conversation_id}\"\n",
    "        folder_name = calculate_hash(folder_name)\n",
    "\n",
    "    folder_path = Path(folder_path)\n",
    "    folder_path.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    folder = folder_path / folder_name\n",
    "    folder.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    return str(folder), folder_name\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "import os\n",
    "import uuid\n",
    "from glob import glob\n",
    "import datetime\n",
    "from pathlib import Path\n",
    "\n",
    "\n",
    "# from promptify.utils.data_utils import *\n",
    "# from promptify.prompter.template_loader import TemplateLoader\n",
    "\n",
    "\n",
    "from typing import List, Dict, Any, Optional\n",
    "from jinja2 import Environment, FileSystemLoader, meta, Template\n",
    "\n",
    "class Prompter:\n",
    "    \n",
    "    \"\"\"\n",
    "    A class to generate and handle utils related to prompts\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        allowed_missing_variables: Optional[List[str]] = None,\n",
    "        default_variable_values: Optional[Dict[str, Any]] = None\n",
    "    ) -> None:\n",
    "        \"\"\"\n",
    "        Initialize Prompter with default or user-specified settings.\n",
    "\n",
    "        Parameters\n",
    "        ----------\n",
    "        allowed_missing_variables : list of str, optional\n",
    "            A list of variable names that are allowed to be missing from the template. Default is ['examples', 'description', 'output_format'].\n",
    "        default_variable_values : dict of str: any, optional\n",
    "            A dictionary mapping variable names to default values to be used in the template.\n",
    "            If a variable is not found in the input dictionary or in the default values, it will be assumed to be required and an error will be raised. Default is an empty dictionary.\n",
    "        \"\"\"\n",
    "\n",
    "\n",
    "        self.default_variable_values = default_variable_values or {}\n",
    "        self.template_loader = TemplateLoader()\n",
    "\n",
    "        self.allowed_missing_variables = [\n",
    "            \"examples\",\n",
    "            \"description\",\n",
    "            \"output_format\",\n",
    "        ]\n",
    "        self.allowed_missing_variables.extend(allowed_missing_variables or [])\n",
    "\n",
    "    def update_default_variable_values(self, new_defaults: Dict[str, Any]) -> None:\n",
    "        self.default_variable_values.update(new_defaults)\n",
    "\n",
    "    def generate(self, template, text_input, **kwargs) -> str:\n",
    "        \"\"\"\n",
    "        Generates a prompt based on a template and input variables.\n",
    "\n",
    "        Parameters\n",
    "        ----------\n",
    "        text_input : str\n",
    "            The input text to use in the prompt.\n",
    "        **kwargs : dict\n",
    "            Additional variables to be used in the template.\n",
    "\n",
    "        Returns\n",
    "        -------\n",
    "        str\n",
    "            The generated prompt string.\n",
    "        \"\"\"\n",
    "\n",
    "        loader = self.template_loader.load_template(\n",
    "            template, kwargs.get(\"from_string\", False)\n",
    "        )\n",
    "\n",
    "        kwargs[\"text_input\"] = text_input\n",
    "        \n",
    "        print(loader)\n",
    "\n",
    "        if loader[\"environment\"]:\n",
    "            variables = self.template_loader.get_template_variables(\n",
    "                loader[\"environment\"], loader[\"template_name\"]\n",
    "            )\n",
    "            variables_dict = {\n",
    "                temp_variable_: kwargs.get(temp_variable_, None)\n",
    "                for temp_variable_ in variables\n",
    "            }\n",
    "\n",
    "            variables_missing = [\n",
    "                variable\n",
    "                for variable in variables\n",
    "                if variable not in kwargs\n",
    "                and variable not in self.allowed_missing_variables\n",
    "                and variable not in self.default_variable_values\n",
    "            ]\n",
    "            \n",
    "            \n",
    "            # print(variables_missing)\n",
    "            if variables_missing:\n",
    "                raise ValueError(\n",
    "                    f\"Missing required variables in template {', '.join(variables_missing)}\"\n",
    "                )\n",
    "        else:\n",
    "            variables_dict = {\"data\": None}\n",
    "\n",
    "        kwargs.update(self.default_variable_values)\n",
    "        prompt = loader[\"template\"].render(**kwargs).strip()\n",
    "        \n",
    "        if kwargs.get(\"verbose\", False):\n",
    "            print(prompt)\n",
    "            \n",
    "        return prompt, variables_dict\n",
    "\n",
    "\n",
    "import json\n",
    "\n",
    "def is_string_or_digit(obj):\n",
    "    \"\"\"\n",
    "    Check if an object is a string or a digit (integer or float).\n",
    "\n",
    "    Args:\n",
    "        obj (any): The object to be checked.\n",
    "\n",
    "    Returns:\n",
    "        bool: True if the object is a string or a digit, False otherwise.\n",
    "\n",
    "    Examples:\n",
    "        >>> is_string_or_digit(\"hello\")\n",
    "        True\n",
    "        >>> is_string_or_digit(123)\n",
    "        True\n",
    "        >>> is_string_or_digit(3.14)\n",
    "        True\n",
    "        >>> is_string_or_digit(True)\n",
    "        False\n",
    "    \"\"\"\n",
    "    return isinstance(obj, (str, int, float))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "81ad0673-e77c-4a4a-b01d-a9cc1605b600",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'template_name': 'abcd.jinja', 'template_dir': '/Users/ankit.pal/Desktop/Promptfinal/Promptify/promptify/prompter', 'environment': <jinja2.environment.Environment object at 0x10646ecd0>, 'template': <Template 'abcd.jinja'>}\n",
      "['domain']\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Missing required variables in template domain",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[7], line 23\u001b[0m\n\u001b[1;32m     20\u001b[0m raw_prompt \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquick brown fox jump over\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m     22\u001b[0m sp \u001b[38;5;241m=\u001b[39m Prompter()\n\u001b[0;32m---> 23\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43msp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m/Users/ankit.pal/Desktop/Promptfinal/Promptify/promptify/prompter/abcd.jinja\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mraw_prompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43maa\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m])\n",
      "Cell \u001b[0;32mIn[3], line 372\u001b[0m, in \u001b[0;36mPrompter.generate\u001b[0;34m(self, template, text_input, **kwargs)\u001b[0m\n\u001b[1;32m    370\u001b[0m     \u001b[38;5;28mprint\u001b[39m(variables_missing)\n\u001b[1;32m    371\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m variables_missing:\n\u001b[0;32m--> 372\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m    373\u001b[0m             \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMissing required variables in template \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(variables_missing)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    374\u001b[0m         )\n\u001b[1;32m    375\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    376\u001b[0m     variables_dict \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28;01mNone\u001b[39;00m}\n",
      "\u001b[0;31mValueError\u001b[0m: Missing required variables in template domain"
     ]
    }
   ],
   "source": [
    "# prompt = \"\"\"{%- if description is not none -%}\n",
    "# {{ description }}\n",
    "# {% endif -%}\n",
    "\n",
    "# You are a highly intelligent and accurate Binary Classification system. You take Passage as input and classify that as either {{ label_0 }} or {{ label_1 }} Category. Your output format is only {{ output_format|default(\"[{'C':Category}]\") }} form, no other form.\n",
    "\n",
    "# {% if examples is defined and examples|length > 0 -%}\n",
    "# Examples:\n",
    "# {% for sentence, label in examples %}\n",
    "# Input: {{ sentence }}\n",
    "# Output: [{'C': '{{ label }}' }]\n",
    "# {% endfor %}\n",
    "# {% endif -%}\n",
    "\n",
    "# Input: {{ text_input }}\n",
    "# Output:\"\"\"\n",
    "\n",
    "# prompt = \"\"\"I ate Something I don't know what it is... Why do I keep Telling things about food and elon musk was acusing Google and PayPal\"\"\"\n",
    "\n",
    "raw_prompt = \"quick brown fox jump over\"\n",
    "\n",
    "sp = Prompter()\n",
    "print(sp.generate('/Users/ankit.pal/Desktop/Promptfinal/Promptify/promptify/prompter/abcd.jinja', raw_prompt, labels = 'aa')[0])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "2c3e8ede-0327-477f-9a2e-a51237697f49",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'template_name': 'from_string', 'template_dir': None, 'environment': None, 'template': <Template memory:1481f0fa0>}\n",
      "/Users/ankit.pal/Desktop/Promptfinal/Promptify/promptify/prompter/ner.jinja\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "dd65652a-6f0b-47fe-a0ca-649f3451bfde",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/ankit.pal/Desktop/Promptfinal/Promptify/promptify/prompter\n"
     ]
    }
   ],
   "source": [
    "!pwd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "4ca4a9bf-02b8-4f3a-a91b-ad96ea045b53",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"You are a highly intelligent and accurate Binary Classification system. You take Passage as input and classify that as either  or  Category. Your output format is only [{'C':Category}] form, no other form.\\n\\nInput: \\nOutput:\""
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from jinja2 import Template, Environment, FileSystemLoader, meta\n",
    "\n",
    "\n",
    "prompt = \"\"\"{%- if description is not none -%}\n",
    "{{ description }}\n",
    "{% endif -%}\n",
    "\n",
    "You are a highly intelligent and accurate Binary Classification system. You take Passage as input and classify that as either {{ label_0 }} or {{ label_1 }} Category. Your output format is only {{ output_format|default(\"[{'C':Category}]\") }} form, no other form.\n",
    "\n",
    "{% if examples is defined and examples|length > 0 -%}\n",
    "Examples:\n",
    "{% for sentence, label in examples %}\n",
    "Input: {{ sentence }}\n",
    "Output: [{'C': '{{ label }}' }]\n",
    "{% endfor %}\n",
    "{% endif -%}\n",
    "\n",
    "Input: {{ text_input }}\n",
    "Output:\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# kwargs.update(self.default_variable_values)\n",
    "promptas = Template(prompt).render().strip()\n",
    "promptas"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "91bdd7df-39ee-40d3-bf07-9df639900edc",
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'str' object has no attribute 'environment'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[58], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mpromptas\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43menvironment\u001b[49m\n",
      "\u001b[0;31mAttributeError\u001b[0m: 'str' object has no attribute 'environment'"
     ]
    }
   ],
   "source": [
    "promptas.environment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e3c4ba2b-3a57-4c39-afa0-423a5e194b02",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import uuid\n",
    "import datetime\n",
    "from typing import Dict, Any\n",
    "\n",
    "\n",
    "def get_conversation_schema(\n",
    "    conversation_id: str, llm_name: str, **llm_metadata: Any\n",
    ") -> Dict[str, Any]:\n",
    "    \"\"\"\n",
    "    Constructs a conversation schema with the specified parameters.\n",
    "\n",
    "    Args:\n",
    "    - conversation_id: A string representing the unique identifier of the conversation.\n",
    "    - llm_name: A string representing the name of the language model.\n",
    "    - **llm_metadata: Optional additional metadata to associate with the language model.\n",
    "\n",
    "    Returns:\n",
    "    A dictionary representing the conversation schema.\n",
    "    \"\"\"\n",
    "    # Remove any api_key from the kwargs to avoid potential security issues\n",
    "    llm_metadata.pop(\"api_key\", None)\n",
    "\n",
    "    # Construct the conversation schema dictionary\n",
    "    conversation_schema = {\n",
    "        \"conversation_id\": conversation_id,\n",
    "        \"start_time\": str(datetime.datetime.now().strftime(\"%Y_%m_%d:%H:%M:%S\")),\n",
    "        \"llm\": {\"name\": llm_name, \"meta_data\": llm_metadata},\n",
    "        \"participants\": [\n",
    "            {\"name\": \"User\", \"is_bot\": False},\n",
    "            {\"name\": \"Assistant\", \"is_bot\": True},\n",
    "        ],\n",
    "        \"messages\": [],\n",
    "    }\n",
    "\n",
    "    return conversation_schema\n",
    "\n",
    "\n",
    "def create_message(\n",
    "    task: str,\n",
    "    prompt: str,\n",
    "    response: str,\n",
    "    structured_response: Any,\n",
    "    **template_metadata: Any\n",
    ") -> Dict[str, Any]:\n",
    "    \"\"\"\n",
    "    Creates a message dictionary with the specified parameters.\n",
    "\n",
    "    Args:\n",
    "    - task: A string representing the task the message is associated with.\n",
    "    - prompt: A string representing the prompt that initiated the message.\n",
    "    - response: A string representing the message response.\n",
    "    - structured_response: A structured representation of the message response.\n",
    "    - **template_metadata: Optional metadata to associate with the message.\n",
    "\n",
    "    Returns:\n",
    "    A dictionary representing the message.\n",
    "    \"\"\"\n",
    "    # Get the current timestamp as a formatted string\n",
    "    timestamp = str(datetime.datetime.now().strftime(\"%Y_%m_%d:%H:%M:%S\"))\n",
    "    prompt_id = str(uuid.uuid4())\n",
    "\n",
    "    # Construct the message dictionary\n",
    "    \n",
    "    message = {\n",
    "        \"timestamp\": timestamp,\n",
    "        \"prompt_id\":prompt_id,\n",
    "        \"task\": task,\n",
    "        \"template_meta_data\": template_metadata,\n",
    "        \"prompt\": prompt,\n",
    "        \"response\": response,\n",
    "        \"structured_response\": structured_response,\n",
    "    }\n",
    "\n",
    "    return message\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class ConversationLogger:\n",
    "    \n",
    "    def __init__(self, conversation_path: str, llm_parameters: Dict):\n",
    "        \n",
    "        \"\"\"Create a logger for a conversation.\n",
    "\n",
    "        Args:\n",
    "            conversation_path: The path to the folder where conversations will be stored.\n",
    "            model_name: The name of the language model used in the conversation.\n",
    "            model_dict: A dictionary containing metadata about the model.\n",
    "        \"\"\"\n",
    "        \n",
    "        \n",
    "        self.conversation_id   = uuid.uuid4()\n",
    "        self.storage_name      = f'/llm_responses/llm_session_{self.conversation_id}/'\n",
    "        self.conversation_path = os.path.join(conversation_path, self.storage_name)\n",
    "        \n",
    "        folder_path = Path(self.conversation_path)\n",
    "        folder_path.mkdir(parents=True, exist_ok=True)\n",
    "        \n",
    "        model_dict = {key: value for key, value in llm_parameters.items() if is_string_or_digit(value)}\n",
    "        self.conversation = get_conversation_schema(self.conversation_id, llm_parameters['model'], **llm_parameters)\n",
    "        # write_json(self.conversation_path, self.conversation, \"history\")\n",
    "\n",
    "    def add_message(self, message: Dict[str, Any]):\n",
    "\n",
    "        \"\"\"Add a message to the conversation.\n",
    "        Args:\n",
    "            prompt: The prompt sent to the language model.\n",
    "            response: The response generated by the language model.\n",
    "            source: The source of the message (\"user\" or \"model\").\n",
    "            metadata: Optional metadata about the message.\n",
    "            **kwargs: Additional metadata to be added to the message.\n",
    "        \"\"\"\n",
    "        \n",
    "        message_id   = uuid.uuid4()\n",
    "        self.conversation[\"messages\"].append(message)\n",
    "        write_json(self.conversation_file, self.conversation, message_id)\n",
    "\n",
    "    def __repr__(self):\n",
    "        return f\"ConversationLogger(conversation_id={self.conversation_id}, conversation_path={self.conversation_path})\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ad558482-8c32-442b-ab0e-81d320e3e0c8",
   "metadata": {},
   "outputs": [
    {
     "ename": "OSError",
     "evalue": "[Errno 30] Read-only file system: '/llm_responses'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "File \u001b[0;32m~/miniforge3/lib/python3.9/pathlib.py:1323\u001b[0m, in \u001b[0;36mPath.mkdir\u001b[0;34m(self, mode, parents, exist_ok)\u001b[0m\n\u001b[1;32m   1322\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1323\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_accessor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1324\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m:\n",
      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/llm_responses/llm_session_fbe30bfa-486a-438a-8e4c-b3c89bf05199'",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[0;31mOSError\u001b[0m                                   Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[9], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m yu \u001b[38;5;241m=\u001b[39m \u001b[43mConversationLogger\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m.\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ma\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m:\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[0;32mIn[8], line 104\u001b[0m, in \u001b[0;36mConversationLogger.__init__\u001b[0;34m(self, conversation_path, llm_parameters)\u001b[0m\n\u001b[1;32m    101\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconversation_path \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(conversation_path, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstorage_name)\n\u001b[1;32m    103\u001b[0m folder_path \u001b[38;5;241m=\u001b[39m Path(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconversation_path)\n\u001b[0;32m--> 104\u001b[0m \u001b[43mfolder_path\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43mparents\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexist_ok\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m    106\u001b[0m model_dict \u001b[38;5;241m=\u001b[39m {key: value \u001b[38;5;28;01mfor\u001b[39;00m key, value \u001b[38;5;129;01min\u001b[39;00m llm_parameters\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m is_string_or_digit(value)}\n\u001b[1;32m    107\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconversation \u001b[38;5;241m=\u001b[39m get_conversation_schema(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconversation_id, llm_parameters[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m'\u001b[39m], \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mllm_parameters)\n",
      "File \u001b[0;32m~/miniforge3/lib/python3.9/pathlib.py:1327\u001b[0m, in \u001b[0;36mPath.mkdir\u001b[0;34m(self, mode, parents, exist_ok)\u001b[0m\n\u001b[1;32m   1325\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m parents \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparent \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mself\u001b[39m:\n\u001b[1;32m   1326\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m\n\u001b[0;32m-> 1327\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43mparents\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexist_ok\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m   1328\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmkdir(mode, parents\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m, exist_ok\u001b[38;5;241m=\u001b[39mexist_ok)\n\u001b[1;32m   1329\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m:\n\u001b[1;32m   1330\u001b[0m     \u001b[38;5;66;03m# Cannot rely on checking for EEXIST, since the operating system\u001b[39;00m\n\u001b[1;32m   1331\u001b[0m     \u001b[38;5;66;03m# could give priority to other errors like EACCES or EROFS\u001b[39;00m\n",
      "File \u001b[0;32m~/miniforge3/lib/python3.9/pathlib.py:1323\u001b[0m, in \u001b[0;36mPath.mkdir\u001b[0;34m(self, mode, parents, exist_ok)\u001b[0m\n\u001b[1;32m   1319\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m   1320\u001b[0m \u001b[38;5;124;03mCreate a new directory at this given path.\u001b[39;00m\n\u001b[1;32m   1321\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m   1322\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1323\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_accessor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1324\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m:\n\u001b[1;32m   1325\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m parents \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparent \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mself\u001b[39m:\n",
      "\u001b[0;31mOSError\u001b[0m: [Errno 30] Read-only file system: '/llm_responses'"
     ]
    }
   ],
   "source": [
    "yu = ConversationLogger('.', {'a':1})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9d15ff4-0fa9-41ec-98a2-986507aad799",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
