{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import gradio as gr\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain_community.llms import GPT4All\n", "from langchain_core.prompts import PromptTemplate\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_core.prompts import (\n", " ChatPromptTemplate,\n", " MessagesPlaceholder,\n", " SystemMessagePromptTemplate,\n", " HumanMessagePromptTemplate,\n", ")\n", "\n", "def response(input, history):\n", " # Path to your local GPT4All model\n", " local_path = \"C:/Users/chris/AppData/Local/nomic.ai/GPT4All/Meta-Llama-3-8B-Instruct.Q4_0.gguf\"\n", "\n", " # Callbacks support token-wise streaming\n", " callbacks = [StreamingStdOutCallbackHandler()]\n", "\n", " # Initialize GPT4All model\n", " llm = GPT4All(streaming= True, model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=False)\n", "\n", " prompt = ChatPromptTemplate(\n", " messages=[\n", " SystemMessagePromptTemplate.from_template(\n", " \"You are a cute anime chatbot having a conversation with a human.\"\n", " ),\n", " # The `variable_name` here is what must align with memory\n", " MessagesPlaceholder(variable_name=\"{history}\"),\n", " HumanMessagePromptTemplate.from_template(\"{question}\")\n", " ]\n", " )\n", "\n", " # Initialize conversation memory\n", " memory = ConversationBufferMemory(memory_key=\"{history}\", return_messages=True)\n", " #memory = history\n", "\n", " # Create an LLMChain instance for the conversation\n", " conversation = LLMChain(\n", " llm=llm,\n", " prompt=prompt,\n", " verbose=False,\n", " memory=memory\n", " )\n", "\n", " # Generate a response using the input and the LLMChain\n", " response = conversation.invoke(input={\"question\": input})[\"text\"]\n", "\n", " return response\n", "\n", "gr.ChatInterface(response).launch()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Local Llama 3 Chatbot" ] }, { "cell_type": "code", "execution_count": 109, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7919\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 109, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import ollama\n", "import gradio as gr\n", "def chat(question, history):\n", " history_format = []\n", " for human, assistant in history:\n", " history_format.append({\"role\": \"user\", \"content\": human})\n", " history_format.append({\"role\": \"assistant\", \"content\":assistant})\n", " history_format.append({'role': 'user', 'content': question})\n", "\n", " messages=history_format\n", " stream = ollama.chat(model='llama3', messages=messages)\n", " #print(history_format)\n", " return stream['message']['content']\n", "\n", "# Gradio interface\n", "assistant_icon = gr.Image(value=\"C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg\", width=32, height=32) # adjust the size as needed\n", "assistant_img = gr.Image(value=\"C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg\", elem_id=\"assistant_img\")\n", "gr.ChatInterface(fn=chat, title=\"Chat Bot\",chatbot=gr.Chatbot(height=300)).launch()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.3" } }, "nbformat": 4, "nbformat_minor": 2 }