yxmauw commited on
Commit
ea90c92
1 Parent(s): a64eb0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -1
app.py CHANGED
@@ -14,11 +14,32 @@ def model_choices():
14
 
15
  model_description = {model['filename']: model['description'] for model in data_json}
16
 
 
17
  def llm_intro(selected_model):
18
  return model_description.get(selected_model, "No description available for this model selection.")
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def generate_text(input_text, selected_model):
21
- model = GPT4All(selected_model)
 
 
 
 
22
  output = model.generate(input_text, max_tokens=100)
23
  return output
24
 
 
14
 
15
  model_description = {model['filename']: model['description'] for model in data_json}
16
 
17
+
18
  def llm_intro(selected_model):
19
  return model_description.get(selected_model, "No description available for this model selection.")
20
 
21
+ model_cache = {} # Global cache
22
+
23
+
24
+ def load_model_with_cache(model_name):
25
+ """
26
+ This function checks the cache before loading a model.
27
+ If the model is cached, it returns the cached version.
28
+ Otherwise, it loads the model, caches it, and then returns it.
29
+ """
30
+ if model_name not in model_cache:
31
+ # Load the model. This is a placeholder; replace with actual model loading logic.
32
+ model = GPT4All(model_name) # Placeholder function
33
+ model_cache[model_name] = model
34
+ return model_cache[model_name]
35
+
36
+
37
  def generate_text(input_text, selected_model):
38
+ """
39
+ Generate text using the selected model.
40
+ This function now uses the caching mechanism to load models.
41
+ """
42
+ model = load_model_with_cache(selected_model)
43
  output = model.generate(input_text, max_tokens=100)
44
  return output
45