Spaces:
Running
Running
File size: 2,790 Bytes
5a2de49 f5f3483 5a2de49 0317031 f5f3483 0317031 f5f3483 0317031 f5f3483 0317031 f5f3483 0317031 f5f3483 0cf10d2 0317031 f5f3483 0317031 f5f3483 0cf10d2 0317031 f5f3483 0317031 f5f3483 0317031 f5f3483 0317031 0cf10d2 0317031 0f8b456 0317031 f5f3483 0317031 5a2de49 0317031 5a2de49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import gradio as gr
from transformers import pipeline, Pipeline
from transformers import Conversation
def chatwith_blenderbot400m():
pipe = pipeline(task="conversational", model="facebook/blenderbot-400M-distill")
user_message = "What are some fun activities I can do in the winter?"
conversation = Conversation(user_message)
print(conversation)
print(type(conversation))
conversation = pipe(conversation)
print(conversation)
conversation.add_message(
{"role": "user", "content": "I would like to do outdoor activities. Which activities can I do?"})
conversation = pipe(conversation)
print(conversation)
def chatwith_qwen2_1point5b_instruct():
pipe = pipeline(task="text-generation", model="Qwen/Qwen2-1.5B-Instruct")
messages = [{"role": "user", "content": "What are some fun activities I can do in the winter?"}]
messages = pipe(messages, max_new_tokens=50)[0]["generated_text"]
print(messages)
messages.append({"role": "user", "content": "I would like to do outdoor activities. Which activities can I do?"})
print(messages)
messages = pipe(messages, max_new_tokens=50)[0]["generated_text"]
print(messages)
#chatwith_qwen2_1point5b_instruct()
def chatwith_qwen2_1point5b_instruct(prompt, max_newtokens):
print("Aaaaa")
pipe = pipeline(task="text-generation", model="Qwen/Qwen2-1.5B-Instruct")
messages = [{"role": "user", "content": prompt}]
messages = pipe(messages, max_new_tokens=max_newtokens)[0]["generated_text"]
return messages
pipe = pipeline(task="text-generation", model="Qwen/Qwen2-1.5B-Instruct")
def chatbot_handler(user_message, history):
bot_response = "I don't think so"
messages = []
user_message = {"role": "user", "content": user_message}
# TODO: build messages based on history then add user_message to messages. call model
for message in history:
messages.append({"role": "user", "content": message[0]})
messages.append({"role": "assistant", "content": message[1]})
# print(message[0])
# print(message[1])
messages.append(user_message)
print(f"messages before sending to model {messages}")
messages = pipe(messages, max_new_tokens=512)[0]['generated_text']
print(f"messages after sending to model{messages}")
if messages:
# messages has at least one item
print(f"the last message is: {messages[-1]}")
bot_response = messages[-1]["content"]
print(bot_response)
return bot_response
chatbot = gr.ChatInterface(chatbot_handler)
chatbot.launch(share=False)
|