File size: 2,185 Bytes
049ffe6
ad8cb2c
 
 
 
abe4ee7
445af36
abe4ee7
 
 
 
ad8cb2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a16de15
e5cf0cf
 
ad8cb2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13bdf82
ad8cb2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr
import os
import time
model_id = "alibidaran/Gemma2_Virtual_doctor"



tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
def print_like_dislike(x: gr.LikeData):
    print(x.index, x.value, x.liked)


def add_text(history, text):
    history = history + [(text,None)]
    return history, gr.Textbox(value="", interactive=False)


def add_file(history, file):
    global image_file
    image_file=file.name
    history = history + [((file.name,),None)]
    return history


def bot(history):
    prompt=history[-1][0]
    text=f"<s> ###Human: {prompt} ###Asistant: "
    inputs=tokenizer(text,return_tensors='pt').to('cpu')
    with torch.no_grad():
      outputs=model.generate(**inputs,max_new_tokens=120,do_sample=True,top_p=0.92,top_k=10,temperature=0.7)
    output_text=outputs[:, inputs.input_ids.shape[1]:] 
    response=tokenizer.decode(output_text[0], skip_special_tokens=True)
    print(response)
    history[-1][1] = ""
    for character in response[1:-1]:
        history[-1][1] += character
        time.sleep(0.01)
        yield history


with gr.Blocks() as demo:
    chatbot = gr.Chatbot(
        [],
        elem_id="chatbot",
        bubble_full_width=False,
        #avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))),
    )

    with gr.Row():
        txt = gr.Textbox(
            scale=4,
            show_label=False,
            placeholder="Ask the virtual doctor about your symptoms!",
            container=False,
        )
        btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])

    txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
        bot, chatbot, chatbot, api_name="bot_response"
    )
    txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
    file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
        bot, chatbot, chatbot
    )

    chatbot.like(print_like_dislike, None, None)



if __name__=="__main__":
  demo.launch(share=True,debug=True)