File size: 1,402 Bytes
187fb55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56a76eb
 
fd78b04
56a76eb
 
187fb55
 
 
56a76eb
 
06994a9
56a76eb
187fb55
56a76eb
187fb55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import torch
import random
import time
from transformers import pipeline

generator = pipeline(
    'text-generation',
    model="heegyu/bluechat-v0",
    device="cuda:0" if torch.cuda.is_available() else 'cpu'
)

def query(message, chat_history, max_turn=4):
    prompt = []
    if len(chat_history) > max_turn:
        chat_history = chat_history[-max_turn:]
    for user, bot in chat_history:
        prompt.append(f"<usr> {user}")
        prompt.append(f"<bot> {bot}")
    
    prompt.append(f"<usr> {message}")
    prompt = "\n".join(prompt) + "\n<bot>"

    output = generator(
        prompt,
        # repetition_penalty=1.3,
        # no_repeat_ngram_size=2,
        eos_token_id=2, # \n
        max_new_tokens=128,
        do_sample=True,
        top_p=0.9,
    )[0]['generated_text']

    print(output)

    response = output[len(prompt):]
    return response.strip()

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.Button("Clear")

    def respond(message, chat_history):
        bot_message = query(message, chat_history) #random.choice(["How are you?", "I love you", "I'm very hungry"])
        chat_history.append((message, bot_message))
        # time.sleep(1)
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch()