{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "9ea2530b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pypdf import PdfReader\n",
    "name = 'Jongkook Kim'\n",
    "\n",
    "summary = ''\n",
    "with open('me/summary.txt', 'r', encoding='utf-8') as file:\n",
    "    summary = file.read()\n",
    "\n",
    "linkedin = ''\n",
    "linkedin_profile = PdfReader('me/Profile.pdf')\n",
    "for page in linkedin_profile.pages:\n",
    "    text = page.extract_text()\n",
    "    if text:\n",
    "        linkedin += text\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "97865f2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from dotenv import load_dotenv\n",
    "load_dotenv(override=True)\n",
    "from openai import OpenAI\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "d3468b60",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "from pydantic import BaseModel\n",
    "\n",
    "class Evaluation(BaseModel):\n",
    "    is_acceptable: bool\n",
    "    feedback: str\n",
    "    avator_response: str\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "6d0a7e9d",
   "metadata": {},
   "outputs": [],
   "source": [
    "avator_system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n",
    "particularly questions related to {name}'s career, background, skills and experience. \\\n",
    "Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n",
    "You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \\\n",
    "Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
    "If you don't know the answer, say so.\"\n",
    "\n",
    "avator_system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
    "avator_system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\"\n",
    "\n",
    "def avator(user_question, history, evaluation: Evaluation): \n",
    "    system_prompt = ''\n",
    "    \n",
    "    if evaluation != None and not evaluation.is_acceptable:\n",
    "        print(f\"{evaluation.avator_response} is not acceptable. Retry\")\n",
    "        system_prompt = avator_system_prompt + \"\\n\\n## Previous answer rejected\\nYou just tried to reply, but the quality control rejected your reply\\n\"\n",
    "        system_prompt += f\"## Your attempted answer:\\n{evaluation.avator_response}\\n\\n\"\n",
    "        system_prompt += f\"## Reason for rejection:\\n{evaluation.feedback}\\n\\n\"\n",
    "    else:\n",
    "        system_prompt = avator_system_prompt\n",
    "\n",
    "    messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\":\"user\", \"content\": user_question}]\n",
    "\n",
    "    llm_client = OpenAI().chat.completions.create(\n",
    "        model='gpt-4o-mini',\n",
    "        messages=messages\n",
    "    )\n",
    "    \n",
    "    return llm_client.choices[0].message.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "e353c3af",
   "metadata": {},
   "outputs": [],
   "source": [
    "evaluator_system_prompt = f\"You are an evaluator that decides whether a response to a question is acceptable. \\\n",
    "You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \\\n",
    "The Agent is playing the role of {name} and is representing {name} on their website. \\\n",
    "The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
    "The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:\"\n",
    "\n",
    "evaluator_system_prompt += f\"\\n\\n## Summary:\\n{summary}\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
    "evaluator_system_prompt += f\"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback.\"\n",
    "\n",
    "def evaluator_user_prompt(reply, message, history):\n",
    "    user_prompt = f\"Here's the conversation between the User and the Agent: \\n\\n{history}\\n\\n\"\n",
    "    user_prompt += f\"Here's the latest message from the User: \\n\\n{message}\\n\\n\"\n",
    "    user_prompt += f\"Here's the latest response from the Agent: \\n\\n{reply}\\n\\n\"\n",
    "    user_prompt += \"Please evaluate the response, replying with whether it is acceptable and your feedback.\"\n",
    "    return user_prompt\n",
    "\n",
    "def evaluator(user_question, avator_response, history) -> Evaluation:\n",
    "    messages = [{'role':'system', 'content': evaluator_system_prompt}] + [{'role':'user', 'content':evaluator_user_prompt(reply=avator_response, message=user_question, history=history)}]\n",
    "\n",
    "    llm_client = OpenAI(api_key=os.getenv('GOOGLE_API_KEY'), base_url='https://generativelanguage.googleapis.com/v1beta/openai/')\n",
    "    response = llm_client.beta.chat.completions.parse(model='gemini-2.0-flash',messages=messages,response_format=Evaluation)\n",
    "\n",
    "    evaluation = response.choices[0].message.parsed\n",
    "\n",
    "    evaluation.avator_response = avator_response\n",
    "\n",
    "    if 'xyz' in avator_response:\n",
    "        evaluation = Evaluation(is_acceptable=False, feedback=\"fake feedback\", avator_response='fake response')\n",
    "\n",
    "    return evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f34731b",
   "metadata": {},
   "outputs": [],
   "source": [
    "max_evaluate = 2\n",
    "def orchestrator(message, history):\n",
    "    avator_response = avator(message, history, None)\n",
    "    print('avator returns response')\n",
    "    for occurrence in range(1, max_evaluate+1):\n",
    "        print(f'try {occurrence}')\n",
    "        evaluation = evaluator(user_question=message, avator_response=avator_response, history=history)\n",
    "        print('evalautor returns evaluation')\n",
    "        if not evaluation.is_acceptable:\n",
    "            print('response from avator is not acceptable')\n",
    "            message_with_feedback = evaluation.feedback + message\n",
    "            avator_response = avator(message_with_feedback, history, evaluation)\n",
    "            print(f'get response from avator {occurrence} times')\n",
    "        else:\n",
    "            print(f'reponse from avator is acceptable in {occurrence} times')\n",
    "            break\n",
    "\n",
    "    \n",
    "    print('returning final response')\n",
    "    return avator_response\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ea996e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import gradio\n",
    "gradio.ChatInterface(orchestrator, type=\"messages\").launch()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
