vincentclaes commited on
Commit
a59370d
1 Parent(s): 994c940

add examples

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -122,7 +122,7 @@ g = gr.Interface(
122
  gr.components.Textbox(
123
  lines=2, label="FAQ", placeholder="Ask me anything about this website?"
124
  ),
125
- gr.components.Textbox(lines=2, label="Website URL", placeholder="https://www.meet-drift.ai/"),
126
  # gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
127
  # gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
128
  # gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
@@ -138,6 +138,11 @@ g = gr.Interface(
138
  )
139
  ],
140
  title="FAQ A Website",
 
 
 
 
 
141
  # description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
142
  )
143
  g.queue(concurrency_count=1)
 
122
  gr.components.Textbox(
123
  lines=2, label="FAQ", placeholder="Ask me anything about this website?"
124
  ),
125
+ gr.components.Textbox(lines=1, label="Website URL", placeholder="https://www.meet-drift.ai/"),
126
  # gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
127
  # gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
128
  # gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
 
138
  )
139
  ],
140
  title="FAQ A Website",
141
+ examples=[
142
+ ["Can you list the capabilities this company has in bullet points?", "https://www.meet-drift.ai/"],
143
+ ["What's the name of the founder?", "https://www.meet-drift.ai/about"],
144
+ ["in 1 word what's the service the company is providing?", "https://www.meet-drift.ai/"],
145
+ ]
146
  # description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
147
  )
148
  g.queue(concurrency_count=1)