{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "#! pip install langchain" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "from langchain.chains import ConversationChain\n", "from dotenv import load_dotenv\n", "from langchain import OpenAI\n", "import os" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "load_dotenv()\n", "API_KEY = os.getenv('OPENAI_KEY')\n", "client = OpenAI()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Conversación con Conversation Buffer Window Memory" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "from langchain.chains.conversation.memory import ConversationBufferWindowMemory" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "carse = OpenAI(model_name = \"ft:gpt-3.5-turbo-1106:personal:carse:8U71tg31\",\n", " temperature = 0,\n", " model_kwargs={\"stop\": \"\\nHuman:\"})" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "window_memory = ConversationBufferWindowMemory(k=5)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "conversation = ConversationChain(\n", " llm = carse,\n", " verbose = True,\n", " memory = window_memory\n", ")" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n", "Prompt after formatting:\n", "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n", "\n", "Current conversation:\n", "Human: Hola amor, cómo dormiste hoy?\n", "AI: Hola! Dormí muy bien, gracias. Y tú?\n", "Human: Bien bien, la verdad no me quejo nada\n", "AI: Me alegra escuchar eso. Cómo estuvo tu día ayer?\n", "Human: hace mucho calooor\n", "AI:\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "' Sí, el clima ha estado muy caliente últimamente. Dónde estabas?'" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "respuesta = input(\"Contesta aquí:\")\n", "conversation.predict(input=respuesta)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Human: Hola amor, cómo dormiste hoy?\n", "AI: Hola! Dormí muy bien, gracias. Y tú?\n", "Human: Bien bien, la verdad no me quejo nada\n", "AI: Me alegra escuchar eso. Cómo estuvo tu día ayer?\n", "Human: hace mucho calooor\n", "AI: Sí, el clima ha estado muy caliente últimamente. Dónde estabas?\n" ] } ], "source": [ "print(conversation.memory.buffer)" ] } ], "metadata": { "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" } }, "nbformat": 4, "nbformat_minor": 2 }