File size: 3,887 Bytes
5138ffd
 
e653440
109bbf8
9bdc545
 
5138ffd
 
 
 
16c4fb3
 
 
 
 
 
 
 
 
9bdc545
 
 
 
 
 
 
 
 
e653440
109bbf8
389e675
e653440
9bdc545
389e675
9bdc545
 
 
 
 
e653440
9bdc545
e653440
 
 
 
 
 
 
 
c57a974
 
 
 
 
 
9bdc545
 
 
 
 
 
e653440
9bdc545
 
e653440
e9ae9e4
 
 
 
9bdc545
 
fd28db2
9bdc545
 
 
 
 
fd28db2
e653440
9bdc545
 
5138ffd
9bdc545
16c4fb3
9bdc545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b02ce42
5138ffd
 
9bdc545
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
import os
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread


# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">CodeGemma</h1>

<p>This Space demonstrates model <a href="https://huggingface.co/google/codegemma-7b-it">CodeGemma-7b-it</a> by Google. CodeGemma is a collection of lightweight open code models built on top of Gemma. Feel free to play with it, or duplicate to run privately!</p>

<p>🔎 For more details about the CodeGemma release and how to use the models with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/codegemma">at our blog post</a>.</p>
</div>
'''

PLACEHOLDER = """
<div style="opacity: 0.65;">
    <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/7dd7659cff2eab51f0f5336f378edfca01dd16fa/gemma_lockup_vertical_full-color_rgb.png" style="width:30%;">
    <br><b>CodeGemma-7B-IT Chatbot</b>
</div>
"""

    
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("google/codegemma-7b-it")
model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b-it", device_map="auto")


@spaces.GPU(duration=120)
def codegemma(message: str, 
              history: list, 
              temperature: float, 
              max_new_tokens: int
             ) -> str:
    """
    Generate a streaming response using the CodeGemma model.
    Args:
        message (str): The input message.
        history (list): The conversation history used by ChatInterface.
        temperature (float): The temperature for generating the response.
        max_new_tokens (int): The maximum number of new tokens to generate.
    Returns:
        str: The generated response.
    """
    conversation = []
    for user, assistant in history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
    conversation.append({"role": "user", "content": message})

    input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
    
    streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)

    generate_kwargs = dict(
        input_ids= input_ids,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        temperature=temperature,
    )
    # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.             
    if temperature == 0:
        generate_kwargs['do_sample'] = False
        
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    outputs = []
    for text in streamer:
        outputs.append(text)
        yield "".join(outputs)
        

# Gradio block
chatbot=gr.Chatbot(placeholder=PLACEHOLDER,height=500)

with gr.Blocks(fill_height=True) as demo:
    
    gr.HTML(DESCRIPTION)
    
    gr.ChatInterface(
        fn=codegemma,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(minimum=0,
                      maximum=1, 
                      step=0.1,
                      value=0.95, 
                      label="Temperature", 
                      render=False),
            gr.Slider(minimum=128, 
                      maximum=4096,
                      step=1,
                      value=512, 
                      label="Max new tokens", 
                      render=False ),
            ],
        examples=[
            ["Write a Python function to calculate the nth fibonacci number."]
            ],
        cache_examples=False,
                     )
    
if __name__ == "__main__":
    demo.launch()