Abhaykoul commited on
Commit
dd6de00
·
verified ·
1 Parent(s): 99f7b0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -161
app.py CHANGED
@@ -1,28 +1,35 @@
1
  from fastapi import FastAPI, HTTPException, Query
2
- from fastapi.responses import JSONResponse
3
- from fastapi import FastAPI, HTTPException
4
- from fastapi.responses import StreamingResponse
5
- from webscout import WEBS, YTTranscriber, LLM, fastai
6
- from stream import fastai_stream
7
- from typing import Optional, List, Dict, Union
8
  from fastapi.encoders import jsonable_encoder
9
  from bs4 import BeautifulSoup
10
  import requests
11
- import urllib.parse
12
- import asyncio
13
  import aiohttp
 
14
  import threading
15
  import json
16
- import os
17
- import time
18
- from huggingface_hub import HfApi
19
  from huggingface_hub import InferenceClient
20
  from PIL import Image
21
  import io
22
- import ast
 
 
23
 
24
  app = FastAPI()
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  @app.get("/")
27
  async def root():
28
  return {"message": "API documentation can be found at /docs"}
@@ -39,11 +46,11 @@ async def search(
39
  safesearch: str = "moderate",
40
  region: str = "wt-wt",
41
  backend: str = "api",
42
- proxy: Optional[str] = None # Add proxy parameter here
43
  ):
44
  """Perform a text search."""
45
  try:
46
- with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
47
  results = webs.text(
48
  keywords=q,
49
  region=region,
@@ -68,11 +75,11 @@ async def images(
68
  type_image: Optional[str] = None,
69
  layout: Optional[str] = None,
70
  license_image: Optional[str] = None,
71
- proxy: Optional[str] = None # Add proxy parameter here
72
  ):
73
  """Perform an image search."""
74
  try:
75
- with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
76
  results = webs.images(
77
  keywords=q,
78
  region=region,
@@ -99,11 +106,11 @@ async def videos(
99
  resolution: Optional[str] = None,
100
  duration: Optional[str] = None,
101
  license_videos: Optional[str] = None,
102
- proxy: Optional[str] = None # Add proxy parameter here
103
  ):
104
  """Perform a video search."""
105
  try:
106
- with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
107
  results = webs.videos(
108
  keywords=q,
109
  region=region,
@@ -125,11 +132,11 @@ async def news(
125
  safesearch: str = "moderate",
126
  region: str = "wt-wt",
127
  timelimit: Optional[str] = None,
128
- proxy: Optional[str] = None # Add proxy parameter here
129
  ):
130
  """Perform a news search."""
131
  try:
132
- with WEBS(proxy=proxy) as webs: # Pass proxy to WEBS instance
133
  results = webs.news(
134
  keywords=q,
135
  region=region,
@@ -141,61 +148,6 @@ async def news(
141
  except Exception as e:
142
  raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
143
 
144
- @app.get("/api/llm")
145
- async def llm_chat(
146
- model: str,
147
- message: str,
148
- system_prompt: str = Query(None, description="Optional custom system prompt")
149
- ):
150
- """Interact with a specified large language model with an optional system prompt."""
151
- try:
152
- messages = [{"role": "user", "content": message}]
153
- if system_prompt:
154
- messages.insert(0, {"role": "system", "content": system_prompt}) # Add system message at the beginning
155
-
156
- llm = LLM(model=model)
157
- response = llm.chat(messages=messages)
158
- return JSONResponse(content={"response": response})
159
- except Exception as e:
160
- raise HTTPException(status_code=500, detail=f"Error during LLM chat: {e}")
161
-
162
- @app.get("/api/fastAI")
163
- async def fast_ai(user: str, model: str = "llama3-70b", system: str = "Answer as concisely as possible."):
164
- """Get a response from the Snova AI service."""
165
- try:
166
- response = await asyncio.to_thread(fastai, user, model, system)
167
- return JSONResponse(content={"response": response})
168
- except Exception as e:
169
- raise HTTPException(status_code=500, detail=f"Error during Snova AI request: {e}")
170
-
171
- from pydantic import BaseModel
172
- import asyncio
173
-
174
- app = FastAPI()
175
-
176
- # Define a Pydantic model for the request payload
177
- class FastAIRequest(BaseModel):
178
- user: str
179
- model: str = "llama3-70b"
180
- system: str = "Answer as concisely as possible."
181
-
182
- @app.post("/api/fastAI-post")
183
- async def fast_ai(request: FastAIRequest):
184
- """Get a response from the Snova AI service."""
185
- try:
186
- response = await asyncio.to_thread(fastai, request.user, request.model, request.system)
187
- return JSONResponse(content={"response": response})
188
- except Exception as e:
189
- raise HTTPException(status_code=500, detail=f"Error during Snova AI request: {e}")
190
-
191
- @app.get("/api/streaming-fastAI")
192
- async def fast_ai(user: str, model: str = "llama3-8b", system: str = "Answer as concisely as possible."):
193
- """Get a streaming response from the Snova AI service."""
194
- try:
195
- return StreamingResponse(fastai_stream(user, model, system), media_type="text/event-stream")
196
- except Exception as e:
197
- raise HTTPException(status_code=500, detail=f"Error during Snova AI request: {e}")
198
-
199
  @app.get("/api/answers")
200
  async def answers(q: str, proxy: Optional[str] = None):
201
  """Get instant answers for a query."""
@@ -206,39 +158,85 @@ async def answers(q: str, proxy: Optional[str] = None):
206
  except Exception as e:
207
  raise HTTPException(status_code=500, detail=f"Error getting instant answers: {e}")
208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  @app.get("/api/chat")
210
  async def chat(
211
  q: str,
212
  model: str = "gpt-4o-mini",
213
  proxy: Optional[str] = None
214
  ):
215
- """Perform a text search."""
216
  try:
217
  with WEBS(proxy=proxy) as webs:
218
- results = webs.chat(chat_messages=[{"role": "user", "content": q}], model=model)
219
  return JSONResponse(content=jsonable_encoder(results))
220
  except Exception as e:
221
  raise HTTPException(status_code=500, detail=f"Error getting chat results: {e}")
222
 
223
- # Define a Pydantic model for the request payload
224
- class ChatRequest(BaseModel):
225
- q: str
226
- model: str = "gpt-4o-mini"
227
- history: List[Dict[str, str]] = []
228
- proxy: Optional[str] = None
229
-
230
  @app.post("/api/chat-post")
231
- async def chat(request: ChatRequest):
232
- """Perform a text search."""
233
  try:
234
  with WEBS(proxy=request.proxy) as webs:
235
- chat_messages = request.history
236
- chat_messages.append({"role": "user", "content": request.q})
237
- results = webs.chat(chat_messages = chat_messages, model=request.model)
238
  return JSONResponse(content=jsonable_encoder(results))
239
  except Exception as e:
240
  raise HTTPException(status_code=500, detail=f"Error getting chat results: {e}")
241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  def extract_text_from_webpage(html_content):
243
  """Extracts visible text from HTML content using BeautifulSoup."""
244
  soup = BeautifulSoup(html_content, "html.parser")
@@ -379,7 +377,7 @@ def web_search_and_extract_threading(
379
  @app.get("/api/adv_web_search")
380
  async def adv_web_search(
381
  q: str,
382
- model: str = "llama3-8b",
383
  max_results: int = 3,
384
  timelimit: Optional[str] = None,
385
  safesearch: str = "moderate",
@@ -390,7 +388,7 @@ async def adv_web_search(
390
  proxy: Optional[str] = None
391
  ):
392
  """
393
- Combines web search, web extraction, and FastAI chat for advanced search.
394
  """
395
  try:
396
  with WEBS(proxy=proxy) as webs:
@@ -408,7 +406,7 @@ async def adv_web_search(
408
  if result['text']:
409
  extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
410
 
411
- # 3. Construct the prompt for FastAI
412
  ai_prompt = (
413
  f"User Query: {q}\n\n"
414
  f"Please provide a detailed and accurate answer to the user's query. Include relevant information extracted from the search results below. Ensure to cite sources by providing links to the original content where applicable. Format your response as follows:\n\n"
@@ -418,11 +416,9 @@ async def adv_web_search(
418
  f"Search Results:\n{extracted_text}"
419
  )
420
 
421
- # 4. Get the FastAI's response using FastAI service
422
- try:
423
- response = await asyncio.to_thread(fastai, ai_prompt, model=model, system=system_prompt)
424
- except Exception as e:
425
- raise HTTPException(status_code=500, detail=f"Error during FastAI request: {e}")
426
 
427
  # 5. Return the results
428
  return JSONResponse(content={"response": response})
@@ -485,47 +481,23 @@ async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy
485
  except Exception as e:
486
  raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
487
 
 
488
  client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
489
 
490
  @app.get("/api/sd3")
491
- def sd3(prompt :str = "",
492
- steps: int = 20,
493
- width: int = 1000,
494
- height: int = 1000
495
- ):
496
  try:
497
- image = client_sd3.text_to_image(prompt = f"{prompt}, hd, high quality, 4k, masterpiece",
498
- num_inference_steps = steps,
499
- width = width, height = height )
 
 
 
500
  image = Image.open(io.BytesIO(image))
501
- return image
502
  except Exception as e:
503
  raise HTTPException(status_code=500, detail=f"Error during image generation: {e}")
504
 
505
- @app.get("/api/maps")
506
- async def maps(
507
- q: str,
508
- place: Optional[str] = None,
509
- street: Optional[str] = None,
510
- city: Optional[str] = None,
511
- county: Optional[str] = None,
512
- state: Optional[str] = None,
513
- country: Optional[str] = None,
514
- postalcode: Optional[str] = None,
515
- latitude: Optional[str] = None,
516
- longitude: Optional[str] = None,
517
- radius: int = 0,
518
- max_results: int = 10,
519
- proxy: Optional[str] = None
520
- ):
521
- """Perform a maps search."""
522
- try:
523
- with WEBS(proxy=proxy) as webs:
524
- results = webs.maps(keywords=q, place=place, street=street, city=city, county=county, state=state, country=country, postalcode=postalcode, latitude=latitude, longitude=longitude, radius=radius, max_results=max_results)
525
- return JSONResponse(content=jsonable_encoder(results))
526
- except Exception as e:
527
- raise HTTPException(status_code=500, detail=f"Error during maps search: {e}")
528
-
529
  @app.get("/api/translate")
530
  async def translate(
531
  q: str,
@@ -541,16 +513,14 @@ async def translate(
541
  except Exception as e:
542
  raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
543
 
544
- from easygoogletranslate import EasyGoogleTranslate
545
-
546
  @app.get("/api/google_translate")
547
  def google_translate(q: str, from_: Optional[str] = 'auto', to: str = "en"):
548
  try:
549
  translator = EasyGoogleTranslate(
550
- source_language=from_,
551
- target_language=to,
552
- timeout=10
553
- )
554
  result = translator.translate(q)
555
  return JSONResponse(content=jsonable_encoder({"detected_language": from_ , "original": q , "translated": result}))
556
  except Exception as e:
@@ -570,7 +540,6 @@ async def youtube_transcript(
570
  except Exception as e:
571
  raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")
572
 
573
- import requests
574
  @app.get("/weather/json/{location}")
575
  def get_weather_json(location: str):
576
  url = f"https://wttr.in/{location}?format=j1"
@@ -589,29 +558,6 @@ def get_ascii_weather(location: str):
589
  else:
590
  return {"error": f"Unable to fetch weather data. Status code: {response.status_code}"}
591
 
592
- # Run the API server if this script is executed
593
  if __name__ == "__main__":
594
  import uvicorn
595
- uvicorn.run(app, host="0.0.0.0", port=8083)
596
-
597
- # def main():
598
- # # Retrieve the space ID and token from environment variables
599
- # space_id = os.getenv("SPACE_ID")
600
- # token = os.getenv("HF_TOKEN")
601
-
602
- # # Initialize the HfApi with the retrieved token
603
- # api = HfApi(token=token)
604
-
605
- # while True:
606
- # try:
607
- # # Restart the space
608
- # api.restart_space(space_id, factory_reboot=False)
609
- # print(f"Successfully restarted the space: {space_id}")
610
- # except Exception as e:
611
- # print(f"Error restarting the space: {e}")
612
-
613
- # # Wait for 10 minutes before restarting again
614
- # time.sleep(600) # Sleep for 600 seconds (10 minutes)
615
-
616
- # if __name__ == "__main__":
617
- # main()
 
1
  from fastapi import FastAPI, HTTPException, Query
2
+ from fastapi.responses import JSONResponse, StreamingResponse
3
+ from webscout import WEBS, YTTranscriber, LLM
4
+ from typing import Optional, List, Dict
 
 
 
5
  from fastapi.encoders import jsonable_encoder
6
  from bs4 import BeautifulSoup
7
  import requests
 
 
8
  import aiohttp
9
+ import asyncio
10
  import threading
11
  import json
 
 
 
12
  from huggingface_hub import InferenceClient
13
  from PIL import Image
14
  import io
15
+ from easygoogletranslate import EasyGoogleTranslate
16
+
17
+ from pydantic import BaseModel
18
 
19
  app = FastAPI()
20
 
21
+ # Define Pydantic models for request payloads
22
+ class ChatRequest(BaseModel):
23
+ q: str
24
+ model: str = "gpt-4o-mini"
25
+ history: List[Dict[str, str]] = []
26
+ proxy: Optional[str] = None
27
+
28
+ class AIRequest(BaseModel):
29
+ user: str
30
+ model: str = "llama3-70b"
31
+ system: str = "Answer as concisely as possible."
32
+
33
  @app.get("/")
34
  async def root():
35
  return {"message": "API documentation can be found at /docs"}
 
46
  safesearch: str = "moderate",
47
  region: str = "wt-wt",
48
  backend: str = "api",
49
+ proxy: Optional[str] = None
50
  ):
51
  """Perform a text search."""
52
  try:
53
+ with WEBS(proxy=proxy) as webs:
54
  results = webs.text(
55
  keywords=q,
56
  region=region,
 
75
  type_image: Optional[str] = None,
76
  layout: Optional[str] = None,
77
  license_image: Optional[str] = None,
78
+ proxy: Optional[str] = None
79
  ):
80
  """Perform an image search."""
81
  try:
82
+ with WEBS(proxy=proxy) as webs:
83
  results = webs.images(
84
  keywords=q,
85
  region=region,
 
106
  resolution: Optional[str] = None,
107
  duration: Optional[str] = None,
108
  license_videos: Optional[str] = None,
109
+ proxy: Optional[str] = None
110
  ):
111
  """Perform a video search."""
112
  try:
113
+ with WEBS(proxy=proxy) as webs:
114
  results = webs.videos(
115
  keywords=q,
116
  region=region,
 
132
  safesearch: str = "moderate",
133
  region: str = "wt-wt",
134
  timelimit: Optional[str] = None,
135
+ proxy: Optional[str] = None
136
  ):
137
  """Perform a news search."""
138
  try:
139
+ with WEBS(proxy=proxy) as webs:
140
  results = webs.news(
141
  keywords=q,
142
  region=region,
 
148
  except Exception as e:
149
  raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  @app.get("/api/answers")
152
  async def answers(q: str, proxy: Optional[str] = None):
153
  """Get instant answers for a query."""
 
158
  except Exception as e:
159
  raise HTTPException(status_code=500, detail=f"Error getting instant answers: {e}")
160
 
161
+ @app.get("/api/maps")
162
+ async def maps(
163
+ q: str,
164
+ place: Optional[str] = None,
165
+ street: Optional[str] = None,
166
+ city: Optional[str] = None,
167
+ county: Optional[str] = None,
168
+ state: Optional[str] = None,
169
+ country: Optional[str] = None,
170
+ postalcode: Optional[str] = None,
171
+ latitude: Optional[str] = None,
172
+ longitude: Optional[str] = None,
173
+ radius: int = 0,
174
+ max_results: int = 10,
175
+ proxy: Optional[str] = None
176
+ ):
177
+ """Perform a maps search."""
178
+ try:
179
+ with WEBS(proxy=proxy) as webs:
180
+ results = webs.maps(keywords=q, place=place, street=street, city=city, county=county, state=state, country=country, postalcode=postalcode, latitude=latitude, longitude=longitude, radius=radius, max_results=max_results)
181
+ return JSONResponse(content=jsonable_encoder(results))
182
+ except Exception as e:
183
+ raise HTTPException(status_code=500, detail=f"Error during maps search: {e}")
184
+
185
  @app.get("/api/chat")
186
  async def chat(
187
  q: str,
188
  model: str = "gpt-4o-mini",
189
  proxy: Optional[str] = None
190
  ):
191
+ """Interact with a specified large language model."""
192
  try:
193
  with WEBS(proxy=proxy) as webs:
194
+ results = webs.chat(keywords=q, model=model)
195
  return JSONResponse(content=jsonable_encoder(results))
196
  except Exception as e:
197
  raise HTTPException(status_code=500, detail=f"Error getting chat results: {e}")
198
 
 
 
 
 
 
 
 
199
  @app.post("/api/chat-post")
200
+ async def chat_post(request: ChatRequest):
201
+ """Interact with a specified large language model with chat history."""
202
  try:
203
  with WEBS(proxy=request.proxy) as webs:
204
+ results = webs.chat(keywords=request.q, model=request.model, chat_messages=request.history)
 
 
205
  return JSONResponse(content=jsonable_encoder(results))
206
  except Exception as e:
207
  raise HTTPException(status_code=500, detail=f"Error getting chat results: {e}")
208
 
209
+ @app.get("/api/llm")
210
+ async def llm_chat(
211
+ model: str,
212
+ message: str,
213
+ system_prompt: str = Query(None, description="Optional custom system prompt")
214
+ ):
215
+ """Interact with a specified large language model with an optional system prompt."""
216
+ try:
217
+ messages = [{"role": "user", "content": message}]
218
+ if system_prompt:
219
+ messages.insert(0, {"role": "system", "content": system_prompt})
220
+
221
+ llm = LLM(model=model)
222
+ response = llm.chat(messages=messages)
223
+ return JSONResponse(content={"response": response})
224
+ except Exception as e:
225
+ raise HTTPException(status_code=500, detail=f"Error during LLM chat: {e}")
226
+
227
+ @app.post("/api/ai-post")
228
+ async def ai_post(request: AIRequest):
229
+ """Interact with a specified large language model (using AIRequest model)."""
230
+ try:
231
+ llm = LLM(model=request.model)
232
+ response = llm.chat(messages=[
233
+ {"role": "system", "content": request.system},
234
+ {"role": "user", "content": request.user}
235
+ ])
236
+ return JSONResponse(content={"response": response})
237
+ except Exception as e:
238
+ raise HTTPException(status_code=500, detail=f"Error during AI request: {e}")
239
+
240
  def extract_text_from_webpage(html_content):
241
  """Extracts visible text from HTML content using BeautifulSoup."""
242
  soup = BeautifulSoup(html_content, "html.parser")
 
377
  @app.get("/api/adv_web_search")
378
  async def adv_web_search(
379
  q: str,
380
+ model: str = "gpt-4o-mini", # Use webs.chat by default
381
  max_results: int = 3,
382
  timelimit: Optional[str] = None,
383
  safesearch: str = "moderate",
 
388
  proxy: Optional[str] = None
389
  ):
390
  """
391
+ Combines web search, web extraction, and chat model for advanced search.
392
  """
393
  try:
394
  with WEBS(proxy=proxy) as webs:
 
406
  if result['text']:
407
  extracted_text += f"## Content from: {result['link']}\n\n{result['text']}\n\n"
408
 
409
+ # 3. Construct the prompt for the chat model
410
  ai_prompt = (
411
  f"User Query: {q}\n\n"
412
  f"Please provide a detailed and accurate answer to the user's query. Include relevant information extracted from the search results below. Ensure to cite sources by providing links to the original content where applicable. Format your response as follows:\n\n"
 
416
  f"Search Results:\n{extracted_text}"
417
  )
418
 
419
+ # 4. Get the chat model's response using webs.chat
420
+ with WEBS(proxy=proxy) as webs:
421
+ response = webs.chat(keywords=ai_prompt, model=model)
 
 
422
 
423
  # 5. Return the results
424
  return JSONResponse(content={"response": response})
 
481
  except Exception as e:
482
  raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
483
 
484
+ # Stable Diffusion client
485
  client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
486
 
487
  @app.get("/api/sd3")
488
+ def sd3(prompt: str = "", steps: int = 20, width: int = 1000, height: int = 1000):
 
 
 
 
489
  try:
490
+ image = client_sd3.text_to_image(
491
+ prompt=f"{prompt}, hd, high quality, 4k, masterpiece",
492
+ num_inference_steps=steps,
493
+ width=width,
494
+ height=height
495
+ )
496
  image = Image.open(io.BytesIO(image))
497
+ return image
498
  except Exception as e:
499
  raise HTTPException(status_code=500, detail=f"Error during image generation: {e}")
500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
  @app.get("/api/translate")
502
  async def translate(
503
  q: str,
 
513
  except Exception as e:
514
  raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
515
 
 
 
516
  @app.get("/api/google_translate")
517
  def google_translate(q: str, from_: Optional[str] = 'auto', to: str = "en"):
518
  try:
519
  translator = EasyGoogleTranslate(
520
+ source_language=from_,
521
+ target_language=to,
522
+ timeout=10
523
+ )
524
  result = translator.translate(q)
525
  return JSONResponse(content=jsonable_encoder({"detected_language": from_ , "original": q , "translated": result}))
526
  except Exception as e:
 
540
  except Exception as e:
541
  raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")
542
 
 
543
  @app.get("/weather/json/{location}")
544
  def get_weather_json(location: str):
545
  url = f"https://wttr.in/{location}?format=j1"
 
558
  else:
559
  return {"error": f"Unable to fetch weather data. Status code: {response.status_code}"}
560
 
 
561
  if __name__ == "__main__":
562
  import uvicorn
563
+ uvicorn.run(app, host="0.0.0.0", port=8083)