davila7 commited on
Commit
c92fa56
1 Parent(s): 3cca638
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -12,8 +12,8 @@ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
12
  #pipe_biogpt = pipeline("text-generation", model="microsoft/BioGPT-Large", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
13
  pipe_flan_t5 = pipeline("text-generation", model="google/flan-t5-xxl", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
14
  #pipe_gpt2 = pipeline("text-generation", model="gpt2", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
15
- pipe_flan_ul2 = pipeline("text-generation", model="google/flan-ul2", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
16
- #pipe_galactica = pipeline("text-generation", model="facebook/galactica-1.3b", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
17
 
18
  title = "LLM vs LLM"
19
  description = "**Disclaimer:** this demo was made for research purposes."
@@ -22,14 +22,14 @@ def inference(text):
22
  #output_biogpt = pipe_biogpt(text, max_length=100)[0]["generated_text"]
23
  output_flan_t5 = pipe_flan_t5(text, max_length=100)[0]["generated_text"]
24
  #output_gpt2 = pipe_gpt2(text, max_length=100)[0]["generated_text"]
25
- pipe_flan_ul2 = pipe_flan_t5(text, max_length=100)[0]["generated_text"]
26
- #output_galactica = pipe_galactica(text, max_length=100)[0]["generated_text"]
27
  return [
28
  #output_biogpt,
29
  output_flan_t5,
30
  #output_gpt2,
31
- pipe_flan_ul2,
32
- #output_galactica
33
  ]
34
 
35
  io = gr.Interface(
@@ -39,8 +39,8 @@ io = gr.Interface(
39
  #gr.Textbox(lines=3, label="Microsoft: BioGPT-Large"),
40
  gr.Textbox(lines=3, label="Google: FLAN-T5-XXL"),
41
  #gr.Textbox(lines=3, label="GPT-2"),
42
- gr.Textbox(lines=3, label="Google: FLAN-UL2"),
43
- #gr.Textbox(lines=3, label="Facebook: Galactica 1.3B"),
44
  ],
45
  title=title,
46
  description=description,
 
12
  #pipe_biogpt = pipeline("text-generation", model="microsoft/BioGPT-Large", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
13
  pipe_flan_t5 = pipeline("text-generation", model="google/flan-t5-xxl", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
14
  #pipe_gpt2 = pipeline("text-generation", model="gpt2", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
15
+ #pipe_flan_ul2 = pipeline("text-generation", model="google/flan-ul2", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
16
+ pipe_galactica = pipeline("text-generation", model="facebook/galactica-1.3b", device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
17
 
18
  title = "LLM vs LLM"
19
  description = "**Disclaimer:** this demo was made for research purposes."
 
22
  #output_biogpt = pipe_biogpt(text, max_length=100)[0]["generated_text"]
23
  output_flan_t5 = pipe_flan_t5(text, max_length=100)[0]["generated_text"]
24
  #output_gpt2 = pipe_gpt2(text, max_length=100)[0]["generated_text"]
25
+ #pipe_flan_ul2 = pipe_flan_t5(text, max_length=100)[0]["generated_text"]
26
+ output_galactica = pipe_galactica(text, max_length=100)[0]["generated_text"]
27
  return [
28
  #output_biogpt,
29
  output_flan_t5,
30
  #output_gpt2,
31
+ #pipe_flan_ul2,
32
+ output_galactica
33
  ]
34
 
35
  io = gr.Interface(
 
39
  #gr.Textbox(lines=3, label="Microsoft: BioGPT-Large"),
40
  gr.Textbox(lines=3, label="Google: FLAN-T5-XXL"),
41
  #gr.Textbox(lines=3, label="GPT-2"),
42
+ #gr.Textbox(lines=3, label="Google: FLAN-UL2"),
43
+ gr.Textbox(lines=3, label="Facebook: Galactica 1.3B"),
44
  ],
45
  title=title,
46
  description=description,