gstaff commited on
Commit
75025ca
1 Parent(s): 0d6736b

Re-enable client now that Spaces servers are back online.

Browse files
Files changed (1) hide show
  1. app.py +1 -16
app.py CHANGED
@@ -22,22 +22,7 @@ if not HF_TOKEN:
22
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
23
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
24
 
25
-
26
- def configure_image_client():
27
- url = r"http://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/"
28
- try:
29
- response = requests.get(url, params={"view": "api"}, allow_redirects=True)
30
- response.raise_for_status()
31
- content_text = response.text
32
- pattern = r'"root":"(https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/.*?)"'
33
- match = re.findall(pattern, content_text)[0]
34
- return Client(match)
35
- except requests.RequestException as e:
36
- print(f"Error fetching URL content: {e}")
37
- raise e
38
-
39
-
40
- # client = Client("https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/--replicas/0867lltlv/")
41
 
42
 
43
  def init_speech_to_text_model() -> Pipeline:
 
22
  API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
23
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
24
 
25
+ client = Client("https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def init_speech_to_text_model() -> Pipeline: