# populate all models available from GPT4All url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json" response = urlopen(url) data_json = json.loads(response.read()) def model_choices(): model_list = [data_json[i]['filename'] for i in range(len(data_json))] return model_list # get each models' description model_description = {model['filename']: model['description'] for model in data_json} def llm_intro(selected_model): html_string = model_description.get(selected_model, "No description available for this model selection.") formatted_description = html_string.replace("", "").replace("", "").replace("
", "\n").replace("
", "").replace("
  • ", "\n➤ ") return formatted_description def remove_endtags(html_string, tags): """Remove rear HTML tags from the input string.""" for tag in tags: html_string = re.sub(fr"", "", html_string) return html_string def replace_starttags(html_string, replacements): """Replace starting HTML tags with the corresponding values.""" for tag, replacement in replacements.items(): html_string = html_string.replace(tag, replacement) return html_string def format_html_string(html_string): """Format the HTML string to a readable text format.""" tags_to_remove = ["ul", "li", "br"] html_string = remove_endtags(html_string, tags_to_remove) tag_replacements = { "
  • ": "\n➤ ", "
    ": "\n", "": "**", "": "**" } formatted_string = replace_starttags(html_string, tag_replacements) return formatted_string # cache models for faster reloads model_cache = {} def load_model(model_name): """ This function checks the cache before loading a model. If the model is cached, it returns the cached version. Otherwise, it loads the model, caches it, and then returns it. """ if model_name not in model_cache: model = GPT4All(model_name) model_cache[model_name] = model return model_cache[model_name]