Gyr0MAN commited on
Commit
2e818b4
β€’
1 Parent(s): ef991c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
6
  tokenizer = AutoTokenizer.from_pretrained("ping98k/typhoon-7b-rag-instruct-th")
7
  model = AutoModelForCausalLM.from_pretrained("ping98k/typhoon-7b-rag-instruct-th", device_map={"": 0})
8
 
9
- @spaces.GPU(duration=120)
10
  def response(instruction, history, inputText):
11
  inp = f"""### Instruction:
12
  {instruction}
@@ -38,9 +38,9 @@ gr.ChatInterface(
38
  additional_inputs=[
39
  gr.Textbox(defaultInput, label="Input Text"),
40
  ],
41
- ).launch()
42
 
43
- '''@spaces.GPU(duration=120)
44
  def response(inp):
45
  input_ids = tokenizer(inp, return_tensors='pt').to("cuda")
46
  beam_output = model.generate(**input_ids, max_new_tokens=300)
@@ -56,4 +56,4 @@ gradio_interface = gr.Interface(
56
  inputs = "text",
57
  outputs = "text"
58
  )
59
- gradio_interface.launch()'''
 
6
  tokenizer = AutoTokenizer.from_pretrained("ping98k/typhoon-7b-rag-instruct-th")
7
  model = AutoModelForCausalLM.from_pretrained("ping98k/typhoon-7b-rag-instruct-th", device_map={"": 0})
8
 
9
+ '''@spaces.GPU(duration=120)
10
  def response(instruction, history, inputText):
11
  inp = f"""### Instruction:
12
  {instruction}
 
38
  additional_inputs=[
39
  gr.Textbox(defaultInput, label="Input Text"),
40
  ],
41
+ ).launch()'''
42
 
43
+ @spaces.GPU(duration=120)
44
  def response(inp):
45
  input_ids = tokenizer(inp, return_tensors='pt').to("cuda")
46
  beam_output = model.generate(**input_ids, max_new_tokens=300)
 
56
  inputs = "text",
57
  outputs = "text"
58
  )
59
+ gradio_interface.launch()