ysharma HF staff commited on
Commit
21c264c
1 Parent(s): a3a62eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -24,7 +24,7 @@ def codegemma(message: str, history: list, temperature: float, max_new_tokens: i
24
  Returns:
25
  str: The generated response.
26
  """
27
- input_ids = tokenizer(message, return_tensors="pt")
28
  outputs = model.generate(
29
  **input_ids,
30
  temperature=temperature,
@@ -35,13 +35,13 @@ def codegemma(message: str, history: list, temperature: float, max_new_tokens: i
35
 
36
 
37
  placeholder = """
38
- <img src="https://huggingface.co/spaces/ysharma/CodeGemma/resolve/main/gemma_lockup_vertical_full-color_rgb.png" style="width:40%">
39
  <b>CodeGemma-7B-IT</b>
40
  """
41
 
42
  # Gradio block
43
  with gr.Blocks(fill_height=True) as demo:
44
- gr.Markdown("# GEMMA-7b-IT")
45
  #with gr.Tab('CodeGemma Chatbot'):
46
  gr.ChatInterface(codegemma,
47
  examples=[["Write a Python function to calculate the nth fibonacci number."]],
 
24
  Returns:
25
  str: The generated response.
26
  """
27
+ input_ids = tokenizer(message, return_tensors="pt").to("cuda:0")
28
  outputs = model.generate(
29
  **input_ids,
30
  temperature=temperature,
 
35
 
36
 
37
  placeholder = """
38
+ <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/7dd7659cff2eab51f0f5336f378edfca01dd16fa/gemma_lockup_vertical_full-color_rgb.png" style="width:40%">
39
  <b>CodeGemma-7B-IT</b>
40
  """
41
 
42
  # Gradio block
43
  with gr.Blocks(fill_height=True) as demo:
44
+ gr.Markdown("# CODEGEMMA-7b-IT")
45
  #with gr.Tab('CodeGemma Chatbot'):
46
  gr.ChatInterface(codegemma,
47
  examples=[["Write a Python function to calculate the nth fibonacci number."]],