Sleik commited on
Commit
ae7d960
1 Parent(s): b9d6534

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +3 -9
  2. app.py +18 -0
  3. chatbot.ipynb +150 -0
  4. discollm.py +18 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Echo Chatbot
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 4.28.2
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: echo-chatbot
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 4.27.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ollama
2
+ import gradio as gr
3
+ def chat(question, history):
4
+ history_format = []
5
+ for human, assistant in history:
6
+ history_format.append({"role": "user", "content": human})
7
+ history_format.append({"role": "assistant", "content":assistant})
8
+ history_format.append({'role': 'user', 'content': question})
9
+
10
+ messages=history_format
11
+ stream = ollama.chat(model='llama3', messages=messages)
12
+ #print(history_format)
13
+ return stream['message']['content']
14
+
15
+ # Gradio interface
16
+ assistant_icon = gr.Image(value="C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg", width=32, height=32) # adjust the size as needed
17
+ assistant_img = gr.Image(value="C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg", elem_id="assistant_img")
18
+ gr.ChatInterface(fn=chat, title="Chat Bot",chatbot=gr.Chatbot(height=300)).launch()
chatbot.ipynb ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import gradio as gr\n",
10
+ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
11
+ "from langchain.chains import LLMChain\n",
12
+ "from langchain_community.llms import GPT4All\n",
13
+ "from langchain_core.prompts import PromptTemplate\n",
14
+ "from langchain.memory import ConversationBufferMemory\n",
15
+ "from langchain_core.prompts import (\n",
16
+ " ChatPromptTemplate,\n",
17
+ " MessagesPlaceholder,\n",
18
+ " SystemMessagePromptTemplate,\n",
19
+ " HumanMessagePromptTemplate,\n",
20
+ ")\n",
21
+ "\n",
22
+ "def response(input, history):\n",
23
+ " # Path to your local GPT4All model\n",
24
+ " local_path = \"C:/Users/chris/AppData/Local/nomic.ai/GPT4All/Meta-Llama-3-8B-Instruct.Q4_0.gguf\"\n",
25
+ "\n",
26
+ " # Callbacks support token-wise streaming\n",
27
+ " callbacks = [StreamingStdOutCallbackHandler()]\n",
28
+ "\n",
29
+ " # Initialize GPT4All model\n",
30
+ " llm = GPT4All(streaming= True, model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=False)\n",
31
+ "\n",
32
+ " prompt = ChatPromptTemplate(\n",
33
+ " messages=[\n",
34
+ " SystemMessagePromptTemplate.from_template(\n",
35
+ " \"You are a cute anime chatbot having a conversation with a human.\"\n",
36
+ " ),\n",
37
+ " # The `variable_name` here is what must align with memory\n",
38
+ " MessagesPlaceholder(variable_name=\"{history}\"),\n",
39
+ " HumanMessagePromptTemplate.from_template(\"{question}\")\n",
40
+ " ]\n",
41
+ " )\n",
42
+ "\n",
43
+ " # Initialize conversation memory\n",
44
+ " memory = ConversationBufferMemory(memory_key=\"{history}\", return_messages=True)\n",
45
+ " #memory = history\n",
46
+ "\n",
47
+ " # Create an LLMChain instance for the conversation\n",
48
+ " conversation = LLMChain(\n",
49
+ " llm=llm,\n",
50
+ " prompt=prompt,\n",
51
+ " verbose=False,\n",
52
+ " memory=memory\n",
53
+ " )\n",
54
+ "\n",
55
+ " # Generate a response using the input and the LLMChain\n",
56
+ " response = conversation.invoke(input={\"question\": input})[\"text\"]\n",
57
+ "\n",
58
+ " return response\n",
59
+ "\n",
60
+ "gr.ChatInterface(response).launch()\n"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "metadata": {},
67
+ "outputs": [],
68
+ "source": [
69
+ "# Local Llama 3 Chatbot"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": 109,
75
+ "metadata": {},
76
+ "outputs": [
77
+ {
78
+ "name": "stdout",
79
+ "output_type": "stream",
80
+ "text": [
81
+ "Running on local URL: http://127.0.0.1:7919\n",
82
+ "\n",
83
+ "To create a public link, set `share=True` in `launch()`.\n"
84
+ ]
85
+ },
86
+ {
87
+ "data": {
88
+ "text/html": [
89
+ "<div><iframe src=\"http://127.0.0.1:7919/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
90
+ ],
91
+ "text/plain": [
92
+ "<IPython.core.display.HTML object>"
93
+ ]
94
+ },
95
+ "metadata": {},
96
+ "output_type": "display_data"
97
+ },
98
+ {
99
+ "data": {
100
+ "text/plain": []
101
+ },
102
+ "execution_count": 109,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "import ollama\n",
109
+ "import gradio as gr\n",
110
+ "def chat(question, history):\n",
111
+ " history_format = []\n",
112
+ " for human, assistant in history:\n",
113
+ " history_format.append({\"role\": \"user\", \"content\": human})\n",
114
+ " history_format.append({\"role\": \"assistant\", \"content\":assistant})\n",
115
+ " history_format.append({'role': 'user', 'content': question})\n",
116
+ "\n",
117
+ " messages=history_format\n",
118
+ " stream = ollama.chat(model='llama3', messages=messages)\n",
119
+ " #print(history_format)\n",
120
+ " return stream['message']['content']\n",
121
+ "\n",
122
+ "# Gradio interface\n",
123
+ "assistant_icon = gr.Image(value=\"C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg\", width=32, height=32) # adjust the size as needed\n",
124
+ "assistant_img = gr.Image(value=\"C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg\", elem_id=\"assistant_img\")\n",
125
+ "gr.ChatInterface(fn=chat, title=\"Chat Bot\",chatbot=gr.Chatbot(height=300)).launch()"
126
+ ]
127
+ }
128
+ ],
129
+ "metadata": {
130
+ "kernelspec": {
131
+ "display_name": "Python 3",
132
+ "language": "python",
133
+ "name": "python3"
134
+ },
135
+ "language_info": {
136
+ "codemirror_mode": {
137
+ "name": "ipython",
138
+ "version": 3
139
+ },
140
+ "file_extension": ".py",
141
+ "mimetype": "text/x-python",
142
+ "name": "python",
143
+ "nbconvert_exporter": "python",
144
+ "pygments_lexer": "ipython3",
145
+ "version": "3.12.3"
146
+ }
147
+ },
148
+ "nbformat": 4,
149
+ "nbformat_minor": 2
150
+ }
discollm.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ollama
2
+ import gradio as gr
3
+ def chat(question, history):
4
+ history_format = []
5
+ for human, assistant in history:
6
+ history_format.append({"role": "user", "content": human})
7
+ history_format.append({"role": "assistant", "content":assistant})
8
+ history_format.append({'role': 'user', 'content': question})
9
+
10
+ messages=history_format
11
+ stream = ollama.chat(model='llama3', messages=messages)
12
+ #print(history_format)
13
+ return stream['message']['content']
14
+
15
+ # Gradio interface
16
+ assistant_icon = gr.Image(value="C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg", width=32, height=32) # adjust the size as needed
17
+ assistant_img = gr.Image(value="C:/Users/chris/Downloads/japanese-lama-ghibli-artstyle.jpeg", elem_id="assistant_img")
18
+ gr.ChatInterface(fn=chat, title="Chat Bot",chatbot=gr.Chatbot(height=300)).launch()