yangdx commited on
Commit
a142be9
·
1 Parent(s): 43d6ad8

pre-commit run --all-files

Browse files
Files changed (1) hide show
  1. lightrag/api/lightrag_server.py +17 -9
lightrag/api/lightrag_server.py CHANGED
@@ -33,6 +33,7 @@ from dotenv import load_dotenv
33
 
34
  load_dotenv()
35
 
 
36
  def estimate_tokens(text: str) -> int:
37
  """Estimate the number of tokens in text
38
  Chinese characters: approximately 1.5 tokens per character
@@ -52,7 +53,7 @@ def estimate_tokens(text: str) -> int:
52
  LIGHTRAG_NAME = "lightrag"
53
  LIGHTRAG_TAG = "latest"
54
  LIGHTRAG_MODEL = "lightrag:latest"
55
- LIGHTRAG_SIZE = 7365960935 # it's a dummy value
56
  LIGHTRAG_CREATED_AT = "2024-01-15T00:00:00Z"
57
  LIGHTRAG_DIGEST = "sha256:lightrag"
58
 
@@ -242,7 +243,7 @@ def parse_args() -> argparse.Namespace:
242
  Returns:
243
  argparse.Namespace: Parsed arguments
244
  """
245
-
246
  parser = argparse.ArgumentParser(
247
  description="LightRAG FastAPI Server with separate working and input directories"
248
  )
@@ -587,9 +588,12 @@ def create_app(args):
587
  # Initialize document manager
588
  doc_manager = DocumentManager(args.input_dir)
589
 
590
-
591
  async def openai_alike_model_complete(
592
- prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
 
 
 
 
593
  ) -> str:
594
  return await openai_complete_if_cache(
595
  args.llm_model,
@@ -602,7 +606,11 @@ def create_app(args):
602
  )
603
 
604
  async def azure_openai_model_complete(
605
- prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
 
 
 
 
606
  ) -> str:
607
  return await azure_openai_complete_if_cache(
608
  args.llm_model,
@@ -642,12 +650,12 @@ def create_app(args):
642
  )
643
 
644
  # Initialize RAG
645
- if args.llm_binding in ["lollms", "ollama"] :
646
  rag = LightRAG(
647
  working_dir=args.working_dir,
648
  llm_model_func=lollms_model_complete
649
  if args.llm_binding == "lollms"
650
- else ollama_model_complete,
651
  llm_model_name=args.llm_model,
652
  llm_model_max_async=args.max_async,
653
  llm_model_max_token_size=args.max_tokens,
@@ -657,8 +665,8 @@ def create_app(args):
657
  "options": {"num_ctx": args.max_tokens},
658
  },
659
  embedding_func=embedding_func,
660
- )
661
- else :
662
  rag = LightRAG(
663
  working_dir=args.working_dir,
664
  llm_model_func=azure_openai_model_complete
 
33
 
34
  load_dotenv()
35
 
36
+
37
  def estimate_tokens(text: str) -> int:
38
  """Estimate the number of tokens in text
39
  Chinese characters: approximately 1.5 tokens per character
 
53
  LIGHTRAG_NAME = "lightrag"
54
  LIGHTRAG_TAG = "latest"
55
  LIGHTRAG_MODEL = "lightrag:latest"
56
+ LIGHTRAG_SIZE = 7365960935 # it's a dummy value
57
  LIGHTRAG_CREATED_AT = "2024-01-15T00:00:00Z"
58
  LIGHTRAG_DIGEST = "sha256:lightrag"
59
 
 
243
  Returns:
244
  argparse.Namespace: Parsed arguments
245
  """
246
+
247
  parser = argparse.ArgumentParser(
248
  description="LightRAG FastAPI Server with separate working and input directories"
249
  )
 
588
  # Initialize document manager
589
  doc_manager = DocumentManager(args.input_dir)
590
 
 
591
  async def openai_alike_model_complete(
592
+ prompt,
593
+ system_prompt=None,
594
+ history_messages=[],
595
+ keyword_extraction=False,
596
+ **kwargs,
597
  ) -> str:
598
  return await openai_complete_if_cache(
599
  args.llm_model,
 
606
  )
607
 
608
  async def azure_openai_model_complete(
609
+ prompt,
610
+ system_prompt=None,
611
+ history_messages=[],
612
+ keyword_extraction=False,
613
+ **kwargs,
614
  ) -> str:
615
  return await azure_openai_complete_if_cache(
616
  args.llm_model,
 
650
  )
651
 
652
  # Initialize RAG
653
+ if args.llm_binding in ["lollms", "ollama"]:
654
  rag = LightRAG(
655
  working_dir=args.working_dir,
656
  llm_model_func=lollms_model_complete
657
  if args.llm_binding == "lollms"
658
+ else ollama_model_complete,
659
  llm_model_name=args.llm_model,
660
  llm_model_max_async=args.max_async,
661
  llm_model_max_token_size=args.max_tokens,
 
665
  "options": {"num_ctx": args.max_tokens},
666
  },
667
  embedding_func=embedding_func,
668
+ )
669
+ else:
670
  rag = LightRAG(
671
  working_dir=args.working_dir,
672
  llm_model_func=azure_openai_model_complete