chenzihong commited on
Commit
9797003
·
1 Parent(s): 3c5ab1e

fix: change exception type

Browse files
Files changed (1) hide show
  1. lightrag/llm.py +12 -12
lightrag/llm.py CHANGED
@@ -15,7 +15,7 @@ from openai import (
15
  AsyncOpenAI,
16
  APIConnectionError,
17
  RateLimitError,
18
- Timeout,
19
  AsyncAzureOpenAI,
20
  )
21
  from pydantic import BaseModel, Field
@@ -47,7 +47,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
47
  @retry(
48
  stop=stop_after_attempt(3),
49
  wait=wait_exponential(multiplier=1, min=4, max=10),
50
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
51
  )
52
  async def openai_complete_if_cache(
53
  model,
@@ -108,7 +108,7 @@ async def openai_complete_if_cache(
108
  @retry(
109
  stop=stop_after_attempt(3),
110
  wait=wait_exponential(multiplier=1, min=4, max=10),
111
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
112
  )
113
  async def azure_openai_complete_if_cache(
114
  model,
@@ -259,7 +259,7 @@ def initialize_hf_model(model_name):
259
  @retry(
260
  stop=stop_after_attempt(3),
261
  wait=wait_exponential(multiplier=1, min=4, max=10),
262
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
263
  )
264
  async def hf_model_if_cache(
265
  model,
@@ -326,7 +326,7 @@ async def hf_model_if_cache(
326
  @retry(
327
  stop=stop_after_attempt(3),
328
  wait=wait_exponential(multiplier=1, min=4, max=10),
329
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
330
  )
331
  async def ollama_model_if_cache(
332
  model,
@@ -444,7 +444,7 @@ def initialize_lmdeploy_pipeline(
444
  @retry(
445
  stop=stop_after_attempt(3),
446
  wait=wait_exponential(multiplier=1, min=4, max=10),
447
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
448
  )
449
  async def lmdeploy_model_if_cache(
450
  model,
@@ -704,7 +704,7 @@ async def lollms_model_complete(
704
  @retry(
705
  stop=stop_after_attempt(3),
706
  wait=wait_exponential(multiplier=1, min=4, max=10),
707
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
708
  )
709
  async def zhipu_complete_if_cache(
710
  prompt: Union[str, List[Dict[str, str]]],
@@ -834,7 +834,7 @@ async def zhipu_complete(
834
  @retry(
835
  stop=stop_after_attempt(3),
836
  wait=wait_exponential(multiplier=1, min=4, max=60),
837
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
838
  )
839
  async def zhipu_embedding(
840
  texts: list[str], model: str = "embedding-3", api_key: str = None, **kwargs
@@ -870,7 +870,7 @@ async def zhipu_embedding(
870
  @retry(
871
  stop=stop_after_attempt(3),
872
  wait=wait_exponential(multiplier=1, min=4, max=60),
873
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
874
  )
875
  async def openai_embedding(
876
  texts: list[str],
@@ -928,7 +928,7 @@ async def jina_embedding(
928
  @retry(
929
  stop=stop_after_attempt(3),
930
  wait=wait_exponential(multiplier=1, min=4, max=60),
931
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
932
  )
933
  async def nvidia_openai_embedding(
934
  texts: list[str],
@@ -959,7 +959,7 @@ async def nvidia_openai_embedding(
959
  @retry(
960
  stop=stop_after_attempt(3),
961
  wait=wait_exponential(multiplier=1, min=4, max=10),
962
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
963
  )
964
  async def azure_openai_embedding(
965
  texts: list[str],
@@ -990,7 +990,7 @@ async def azure_openai_embedding(
990
  @retry(
991
  stop=stop_after_attempt(3),
992
  wait=wait_exponential(multiplier=1, min=4, max=60),
993
- retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
994
  )
995
  async def siliconcloud_embedding(
996
  texts: list[str],
 
15
  AsyncOpenAI,
16
  APIConnectionError,
17
  RateLimitError,
18
+ APITimeoutError,
19
  AsyncAzureOpenAI,
20
  )
21
  from pydantic import BaseModel, Field
 
47
  @retry(
48
  stop=stop_after_attempt(3),
49
  wait=wait_exponential(multiplier=1, min=4, max=10),
50
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
51
  )
52
  async def openai_complete_if_cache(
53
  model,
 
108
  @retry(
109
  stop=stop_after_attempt(3),
110
  wait=wait_exponential(multiplier=1, min=4, max=10),
111
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APIConnectionError)),
112
  )
113
  async def azure_openai_complete_if_cache(
114
  model,
 
259
  @retry(
260
  stop=stop_after_attempt(3),
261
  wait=wait_exponential(multiplier=1, min=4, max=10),
262
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
263
  )
264
  async def hf_model_if_cache(
265
  model,
 
326
  @retry(
327
  stop=stop_after_attempt(3),
328
  wait=wait_exponential(multiplier=1, min=4, max=10),
329
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
330
  )
331
  async def ollama_model_if_cache(
332
  model,
 
444
  @retry(
445
  stop=stop_after_attempt(3),
446
  wait=wait_exponential(multiplier=1, min=4, max=10),
447
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
448
  )
449
  async def lmdeploy_model_if_cache(
450
  model,
 
704
  @retry(
705
  stop=stop_after_attempt(3),
706
  wait=wait_exponential(multiplier=1, min=4, max=10),
707
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
708
  )
709
  async def zhipu_complete_if_cache(
710
  prompt: Union[str, List[Dict[str, str]]],
 
834
  @retry(
835
  stop=stop_after_attempt(3),
836
  wait=wait_exponential(multiplier=1, min=4, max=60),
837
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
838
  )
839
  async def zhipu_embedding(
840
  texts: list[str], model: str = "embedding-3", api_key: str = None, **kwargs
 
870
  @retry(
871
  stop=stop_after_attempt(3),
872
  wait=wait_exponential(multiplier=1, min=4, max=60),
873
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
874
  )
875
  async def openai_embedding(
876
  texts: list[str],
 
928
  @retry(
929
  stop=stop_after_attempt(3),
930
  wait=wait_exponential(multiplier=1, min=4, max=60),
931
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
932
  )
933
  async def nvidia_openai_embedding(
934
  texts: list[str],
 
959
  @retry(
960
  stop=stop_after_attempt(3),
961
  wait=wait_exponential(multiplier=1, min=4, max=10),
962
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
963
  )
964
  async def azure_openai_embedding(
965
  texts: list[str],
 
990
  @retry(
991
  stop=stop_after_attempt(3),
992
  wait=wait_exponential(multiplier=1, min=4, max=60),
993
+ retry=retry_if_exception_type((RateLimitError, APIConnectionError, APITimeoutError)),
994
  )
995
  async def siliconcloud_embedding(
996
  texts: list[str],