Chris4K commited on
Commit
c1aaeb5
·
verified ·
1 Parent(s): cc69ccc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -664,7 +664,18 @@ def analyze_results(stats_df):
664
  ####
665
 
666
  def get_llm_suggested_settings(file, num_chunks=5):
667
- chunks, _, _ = process_files(file.name if file else None, 'HuggingFace', 'paraphrase-miniLM', 'recursive', 500, 50)
 
 
 
 
 
 
 
 
 
 
 
668
 
669
  # Select a few random chunks
670
  sample_chunks = random.sample(chunks, min(num_chunks, len(chunks)))
@@ -699,9 +710,9 @@ Provide your suggestions in a Python dictionary format."""
699
  suggested_settings = llm(prompt)
700
 
701
  # Parse the generated text to extract the dictionary
702
- # Note: This assumes the LLM generates a valid Python dictionary. In practice, you might need more robust parsing.
703
  try:
704
  settings_dict = eval(suggested_settings)
 
705
  return {
706
  "embedding_models": f"{settings_dict['embedding_model_type']}:{settings_dict['embedding_model_name']}",
707
  "split_strategy": settings_dict["split_strategy"],
@@ -718,12 +729,9 @@ Provide your suggestions in a Python dictionary format."""
718
  except:
719
  return {"error": "Failed to parse LLM suggestions"}
720
 
721
- return settings_dict
722
-
723
-
724
  def update_inputs_with_llm_suggestions(suggestions):
725
- if "error" in suggestions:
726
- return [gr.update() for _ in range(11)] # Return no updates if there's an error
727
 
728
  return [
729
  gr.update(value=[suggestions["embedding_models"]]), # embedding_models_input
 
664
  ####
665
 
666
  def get_llm_suggested_settings(file, num_chunks=5):
667
+ if not file:
668
+ return {"error": "No file uploaded"}
669
+
670
+ chunks, _, _ = process_files(
671
+ file.name,
672
+ 'HuggingFace',
673
+ 'paraphrase-miniLM',
674
+ 'recursive',
675
+ 500,
676
+ 50,
677
+ custom_separators=None
678
+ )
679
 
680
  # Select a few random chunks
681
  sample_chunks = random.sample(chunks, min(num_chunks, len(chunks)))
 
710
  suggested_settings = llm(prompt)
711
 
712
  # Parse the generated text to extract the dictionary
 
713
  try:
714
  settings_dict = eval(suggested_settings)
715
+ # Convert the settings to match the interface inputs
716
  return {
717
  "embedding_models": f"{settings_dict['embedding_model_type']}:{settings_dict['embedding_model_name']}",
718
  "split_strategy": settings_dict["split_strategy"],
 
729
  except:
730
  return {"error": "Failed to parse LLM suggestions"}
731
 
 
 
 
732
  def update_inputs_with_llm_suggestions(suggestions):
733
+ if suggestions is None or "error" in suggestions:
734
+ return [gr.update() for _ in range(11)] # Return no updates if there's an error or None
735
 
736
  return [
737
  gr.update(value=[suggestions["embedding_models"]]), # embedding_models_input