ysharma HF staff commited on
Commit
389e675
1 Parent(s): 21c264c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -8,9 +8,9 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
8
 
9
  # Load the tokenizer and model
10
  tokenizer = GemmaTokenizer.from_pretrained("google/codegemma-7b-it")
11
- model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b-it", device_map="auto") #.to("cuda:0")
12
-
13
 
 
14
  def codegemma(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
15
  """
16
  Generate a response using the CodeGemma model.
@@ -46,6 +46,7 @@ with gr.Blocks(fill_height=True) as demo:
46
  gr.ChatInterface(codegemma,
47
  examples=[["Write a Python function to calculate the nth fibonacci number."]],
48
  fill_height=True,
 
49
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
50
  additional_inputs=[
51
  gr.Slider(0, 1, 0.95, label="Temperature", render=False),
 
8
 
9
  # Load the tokenizer and model
10
  tokenizer = GemmaTokenizer.from_pretrained("google/codegemma-7b-it")
11
+ model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b-it", device_map="auto")
 
12
 
13
+ @spaces.GPU(duration=120)
14
  def codegemma(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
15
  """
16
  Generate a response using the CodeGemma model.
 
46
  gr.ChatInterface(codegemma,
47
  examples=[["Write a Python function to calculate the nth fibonacci number."]],
48
  fill_height=True,
49
+ placeholder=placeholder,
50
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
51
  additional_inputs=[
52
  gr.Slider(0, 1, 0.95, label="Temperature", render=False),