{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-06-19 10:16:01 - _config - DEBUG - [_config.py:80:load_ssl_context] - load_ssl_context verify=True cert=None trust_env=True http2=False\n",
      "2024-06-19 10:16:01 - _config - DEBUG - [_config.py:146:load_ssl_context_verify] - load_verify_locations cafile='/Users/liyin/Documents/test/LightRAG/.venv/lib/python3.11/site-packages/certifi/cacert.pem'\n",
      "2024-06-19 10:16:01 - prompt_builder - INFO - [prompt_builder.py:79:__init__] - Prompt has variables: ['output_str', 'input_str', 'tools_str', 'steps_str', 'context_str', 'examples_str', 'output_format_str', 'task_desc_str', 'chat_history_str']\n",
      "2024-06-19 10:16:01 - generator - INFO - [generator.py:224:call] - prompt_kwargs: {'input_str': 'What is the capital of France?'}\n",
      "2024-06-19 10:16:01 - generator - INFO - [generator.py:225:call] - model_kwargs: {}\n",
      "2024-06-19 10:16:01 - openai_client - INFO - [openai_client.py:196:call] - api_kwargs: {'model': 'gpt-3.5-turbo', 'logprobs': True, 'n': 2, 'messages': [{'role': 'system', 'content': '<Inputs>\\nWhat is the capital of France?\\n</Inputs>\\nYou:'}]}\n",
      "2024-06-19 10:16:01 - _base_client - DEBUG - [_base_client.py:446:_build_request] - Request options: {'method': 'post', 'url': '/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'system', 'content': '<Inputs>\\nWhat is the capital of France?\\n</Inputs>\\nYou:'}], 'model': 'gpt-3.5-turbo', 'logprobs': True, 'n': 2}}\n",
      "2024-06-19 10:16:01 - _base_client - DEBUG - [_base_client.py:949:_request] - Sending HTTP Request: POST https://api.openai.com/v1/chat/completions\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - connect_tcp.complete return_value=<httpcore._backends.sync.SyncStream object at 0x12a12af10>\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - start_tls.started ssl_context=<ssl.SSLContext object at 0x12a0f0e60> server_hostname='api.openai.com' timeout=5.0\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - start_tls.complete return_value=<httpcore._backends.sync.SyncStream object at 0x12a13e3d0>\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - send_request_headers.started request=<Request [b'POST']>\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - send_request_headers.complete\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - send_request_body.started request=<Request [b'POST']>\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - send_request_body.complete\n",
      "2024-06-19 10:16:01 - _trace - DEBUG - [_trace.py:45:trace] - receive_response_headers.started request=<Request [b'POST']>\n",
      "2024-06-19 10:16:02 - _trace - DEBUG - [_trace.py:45:trace] - receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Wed, 19 Jun 2024 17:16:02 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'openai-organization', b'sylphai'), (b'openai-processing-ms', b'311'), (b'openai-version', b'2020-10-01'), (b'strict-transport-security', b'max-age=31536000; includeSubDomains'), (b'x-ratelimit-limit-requests', b'10000'), (b'x-ratelimit-limit-tokens', b'1000000'), (b'x-ratelimit-remaining-requests', b'9999'), (b'x-ratelimit-remaining-tokens', b'999952'), (b'x-ratelimit-reset-requests', b'6ms'), (b'x-ratelimit-reset-tokens', b'2ms'), (b'x-request-id', b'f405aecf6c73168fcece4f167fa7a93f'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=x4MEhzj3rg3GGs2JLrsqliTlkm1aMbBflEKfed0SzYk-1718817362-1.0.1.1-scYAKPWaejWwjGu8zHY9ZbpOb2OeVPgQtLJgttWMJ318wU40gKtATbfN9vzF204_91pB7Gzz_1DD.hqlMBSvrA; path=/; expires=Wed, 19-Jun-24 17:46:02 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Set-Cookie', b'_cfuvid=JGeXQhBx3JhVJUJSi5uAH7UiHEQqm4yC9zcqeQuRGVg-1718817362117-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'89652a1df82d67bf-SJC'), (b'Content-Encoding', b'gzip'), (b'alt-svc', b'h3=\":443\"; ma=86400')])\n",
      "2024-06-19 10:16:02 - _client - INFO - [_client.py:1026:_send_single_request] - HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2024-06-19 10:16:02 - _trace - DEBUG - [_trace.py:45:trace] - receive_response_body.started request=<Request [b'POST']>\n",
      "2024-06-19 10:16:02 - _trace - DEBUG - [_trace.py:45:trace] - receive_response_body.complete\n",
      "2024-06-19 10:16:02 - _trace - DEBUG - [_trace.py:45:trace] - response_closed.started\n",
      "2024-06-19 10:16:02 - _trace - DEBUG - [_trace.py:45:trace] - response_closed.complete\n",
      "2024-06-19 10:16:02 - _base_client - DEBUG - [_base_client.py:988:_request] - HTTP Response: POST https://api.openai.com/v1/chat/completions \"200 OK\" Headers([('date', 'Wed, 19 Jun 2024 17:16:02 GMT'), ('content-type', 'application/json'), ('transfer-encoding', 'chunked'), ('connection', 'keep-alive'), ('openai-organization', 'sylphai'), ('openai-processing-ms', '311'), ('openai-version', '2020-10-01'), ('strict-transport-security', 'max-age=31536000; includeSubDomains'), ('x-ratelimit-limit-requests', '10000'), ('x-ratelimit-limit-tokens', '1000000'), ('x-ratelimit-remaining-requests', '9999'), ('x-ratelimit-remaining-tokens', '999952'), ('x-ratelimit-reset-requests', '6ms'), ('x-ratelimit-reset-tokens', '2ms'), ('x-request-id', 'f405aecf6c73168fcece4f167fa7a93f'), ('cf-cache-status', 'DYNAMIC'), ('set-cookie', '__cf_bm=x4MEhzj3rg3GGs2JLrsqliTlkm1aMbBflEKfed0SzYk-1718817362-1.0.1.1-scYAKPWaejWwjGu8zHY9ZbpOb2OeVPgQtLJgttWMJ318wU40gKtATbfN9vzF204_91pB7Gzz_1DD.hqlMBSvrA; path=/; expires=Wed, 19-Jun-24 17:46:02 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('set-cookie', '_cfuvid=JGeXQhBx3JhVJUJSi5uAH7UiHEQqm4yC9zcqeQuRGVg-1718817362117-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), ('server', 'cloudflare'), ('cf-ray', '89652a1df82d67bf-SJC'), ('content-encoding', 'gzip'), ('alt-svc', 'h3=\":443\"; ma=86400')])\n",
      "2024-06-19 10:16:02 - _base_client - DEBUG - [_base_client.py:996:_request] - request_id: f405aecf6c73168fcece4f167fa7a93f\n",
      "2024-06-19 10:16:02 - openai_client - DEBUG - [openai_client.py:134:parse_chat_completion] - completion: ChatCompletion(id='chatcmpl-9btCbdPdJz0rkB58Mu8GtuSsZIc6d', choices=[Choice(finish_reason='stop', index=0, logprobs=ChoiceLogprobs(content=[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-0.33268073, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-4.1273333e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00014191943, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-2.6968896e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-9.0883464e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=-4.723352e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-5.407367e-05, top_logprobs=[])]), message=ChatCompletionMessage(content='The capital of France is Paris.', role='assistant', function_call=None, tool_calls=None)), Choice(finish_reason='stop', index=1, logprobs=ChoiceLogprobs(content=[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-0.33268073, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-4.1273333e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00014191943, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-2.6968896e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-9.0883464e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=-4.723352e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-5.407367e-05, top_logprobs=[])]), message=ChatCompletionMessage(content='The capital of France is Paris.', role='assistant', function_call=None, tool_calls=None))], created=1718817361, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=14, prompt_tokens=22, total_tokens=36))\n",
      "[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-0.33268073, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-4.1273333e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00014191943, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-2.6968896e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-9.0883464e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=-4.723352e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-5.407367e-05, top_logprobs=[])]\n",
      "[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-0.33268073, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-4.1273333e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00014191943, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-2.6968896e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-9.0883464e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=-4.723352e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-5.407367e-05, top_logprobs=[])]\n",
      "2024-06-19 10:16:02 - generator - INFO - [generator.py:233:call] - output: GeneratorOutput(data=[[TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)], [TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)]], error=None, usage=None, raw_response=[[TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)], [TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)]])\n",
      "GeneratorOutput(data=[[TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)], [TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)]], error=None, usage=None, raw_response=[[TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)], [TokenLogProb(token='The', logprob=-0.33268073), TokenLogProb(token=' capital', logprob=-4.1273333e-06), TokenLogProb(token=' of', logprob=-0.00014191943), TokenLogProb(token=' France', logprob=-2.6968896e-06), TokenLogProb(token=' is', logprob=-9.0883464e-07), TokenLogProb(token=' Paris', logprob=-4.723352e-06), TokenLogProb(token='.', logprob=-5.407367e-05)]])\n"
     ]
    }
   ],
   "source": [
    "from lightrag.core import Generator\n",
    "from lightrag.components.model_client import OpenAIClient, get_all_messages_content, get_probabilities\n",
    "from lightrag.utils import enable_library_logging\n",
    "\n",
    "enable_library_logging(level=\"DEBUG\")\n",
    "\n",
    "model_kwargs={\n",
    "    \"model\": \"gpt-3.5-turbo\",\n",
    "    \"logprobs\": True,\n",
    "    \"n\": 2, # the number of chat completion choices\n",
    "}\n",
    "model_client = OpenAIClient(chat_completion_parser=get_probabilities)\n",
    "generator = Generator(model_client=model_client, model_kwargs=model_kwargs)\n",
    "\n",
    "\n",
    "prompt_kwargs = {\n",
    "    \"input_str\": \"What is the capital of France?\",\n",
    "}\n",
    "output = generator(prompt_kwargs=prompt_kwargs)\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "2024-06-19 09:14:19 - openai_client - DEBUG - [openai_client.py:81:parse_chat_completion] - completion: ChatCompletion(id='chatcmpl-9bsEtR7IkwwKKuXao3hCFx210vThx', choices=[Choice(finish_reason='stop', index=0, logprobs=ChoiceLogprobs(content=[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-0.29857525, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-4.604148e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00016754455, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-3.0545007e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-2.220075e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=-4.00813e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-0.0001039008, top_logprobs=[])]), message=ChatCompletionMessage(content='The capital of France is Paris.', role='assistant', function_call=None, tool_calls=None)), Choice(finish_reason='stop', index=1, logprobs=ChoiceLogprobs(content=[ChatCompletionTokenLogprob(token='Paris', bytes=[80, 97, 114, 105, 115], logprob=-1.3551816, top_logprobs=[])]), message=ChatCompletionMessage(content='Paris', role='assistant', function_call=None, tool_calls=None))], created=1718813659, model='gpt-3.5-turbo-0125', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=22, total_tokens=30))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "GeneratorOutput(data='LightRAG is a light-based Real-time Anomaly Generator, which is a special type of anomaly detection system. It uses a combination of visual and statistical techniques to detect unusual patterns or outliers in a dataset in real-time, often for purposes such as identifying security threats, detecting fraud, or monitoring system performance. Would you like to know more about its applications or how it works?', error=None, usage=None, raw_response='LightRAG is a light-based Real-time Anomaly Generator, which is a special type of anomaly detection system. It uses a combination of visual and statistical techniques to detect unusual patterns or outliers in a dataset in real-time, often for purposes such as identifying security threats, detecting fraud, or monitoring system performance. Would you like to know more about its applications or how it works?')\n"
     ]
    }
   ],
   "source": [
    "from lightrag.core import Component, Generator, Prompt\n",
    "from lightrag.components.model_client import GroqAPIClient\n",
    "from lightrag.utils import setup_env # noqa\n",
    "\n",
    "\n",
    "class SimpleQA(Component):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        template = r\"\"\"<SYS>\n",
    "        You are a helpful assistant.\n",
    "        </SYS>\n",
    "        User: {{input_str}}\n",
    "        You:\n",
    "        \"\"\"\n",
    "        self.generator = Generator(\n",
    "            model_client=GroqAPIClient(), model_kwargs={\"model\": \"llama3-8b-8192\"}, template=template\n",
    "        )\n",
    "\n",
    "    def call(self, query):\n",
    "        return self.generator({\"input_str\": query})\n",
    "\n",
    "    async def acall(self, query):\n",
    "        return await self.generator.acall({\"input_str\": query})\n",
    "\n",
    "\n",
    "qa = SimpleQA()\n",
    "answer = qa(\"What is LightRAG?\")\n",
    "\n",
    "print(answer)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "lightrag-project",
   "language": "python",
   "name": "light-rag-project"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
