pandora-s commited on
Commit
1f5d9f7
·
verified ·
1 Parent(s): f529593

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -2
app.py CHANGED
@@ -6,7 +6,7 @@ from threading import Timer
6
 
7
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
  def get_available_free():
9
- models = InferenceClient().list_deployed_models("text-generation-inference")['text-generation']
10
  models_conclusion = {
11
  "Model": [],
12
  "API": [],
@@ -87,7 +87,21 @@ def color_status(api_value, cell_value):
87
  def search_models(query):
88
  return display_table(query)
89
 
90
- description = "This is a space that retrieves the status of all supported HF LLM Serverless Inference APIs.\nUpdates every 2 hours!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  with gr.Blocks() as demo:
92
  gr.Markdown("## HF Serverless LLM Inference API Status")
93
  gr.Markdown(description)
 
6
 
7
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
  def get_available_free():
9
+ models = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")['text-generation']
10
  models_conclusion = {
11
  "Model": [],
12
  "API": [],
 
87
  def search_models(query):
88
  return display_table(query)
89
 
90
+ description = """
91
+ This is a space that retrieves the status of all supported HF LLM Serverless Inference APIs.
92
+ *Updates every 2 hours!*
93
+
94
+ If you are a student or you just want to quickly see what models are available to experiment for free, you are most likely highly interested on the free API huggingface provides... but like me, you struggle to find what models are available or not!
95
+ This is why I made this space that every 2 hours checks and updates the status of the list of LLMs that are in theory supported by retrieving the list in `InferenceClient().list_deployed_models("text-generation-inference")['text-generation']`.
96
+
97
+ So all you need is plug:
98
+ ```py
99
+ from huggingface_hub import InferenceClient
100
+ inf = InferenceClient(model = "MODEL", token = "TOKEN")
101
+ response = inf.text_generation("And play !!")
102
+ print(response)
103
+ ```
104
+ """
105
  with gr.Blocks() as demo:
106
  gr.Markdown("## HF Serverless LLM Inference API Status")
107
  gr.Markdown(description)