ParisNeo commited on
Commit
2019ada
·
1 Parent(s): b44ccfd

Next test of timeout

Browse files
lightrag/api/lightrag_server.py CHANGED
@@ -101,12 +101,17 @@ def parse_args():
101
  help="Embedding model name (default: bge-m3:latest)",
102
  )
103
 
 
 
 
 
 
104
  parser.add_argument(
105
  "--timeout",
106
- default=300,
107
- help="Timeout is seconds (useful when using slow AI)",
 
108
  )
109
-
110
  # RAG configuration
111
  parser.add_argument(
112
  "--max-async", type=int, default=4, help="Maximum async operations (default: 4)"
 
101
  help="Embedding model name (default: bge-m3:latest)",
102
  )
103
 
104
+ def timeout_type(value):
105
+ if value is None or value == "None":
106
+ return None
107
+ return int(value)
108
+
109
  parser.add_argument(
110
  "--timeout",
111
+ default=None,
112
+ type=timeout_type,
113
+ help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout",
114
  )
 
115
  # RAG configuration
116
  parser.add_argument(
117
  "--max-async", type=int, default=4, help="Maximum async operations (default: 4)"
lightrag/llm.py CHANGED
@@ -407,11 +407,10 @@ async def lollms_model_if_cache(
407
  full_prompt += prompt
408
 
409
  request_data["prompt"] = full_prompt
410
- timeout = aiohttp.ClientTimeout(total=kwargs.get("timeout", 300)) # 300 seconds = 5 minutes
411
 
412
  async with aiohttp.ClientSession(timeout=timeout) as session:
413
  if stream:
414
-
415
  async def inner():
416
  async with session.post(
417
  f"{base_url}/lollms_generate", json=request_data
 
407
  full_prompt += prompt
408
 
409
  request_data["prompt"] = full_prompt
410
+ timeout = aiohttp.ClientTimeout(total=kwargs.get("timeout", None))
411
 
412
  async with aiohttp.ClientSession(timeout=timeout) as session:
413
  if stream:
 
414
  async def inner():
415
  async with session.post(
416
  f"{base_url}/lollms_generate", json=request_data