seanpedrickcase commited on
Commit
5cdf399
1 Parent(s): f301d67

Removed reference to ctransformers

Browse files
Files changed (2) hide show
  1. app.py +0 -74
  2. chatfuncs/chatfuncs.py +0 -2
app.py CHANGED
@@ -9,11 +9,8 @@ import gradio as gr
9
  import pandas as pd
10
 
11
  from transformers import AutoTokenizer
12
- from ctransformers import AutoModelForCausalLM
13
-
14
  import torch
15
 
16
- import llama_cpp
17
  from llama_cpp import Llama
18
  from huggingface_hub import hf_hub_download
19
 
@@ -59,77 +56,6 @@ import chatfuncs.chatfuncs as chatf
59
  chatf.embeddings = load_embeddings(embeddings_name)
60
  chatf.vectorstore = get_faiss_store(faiss_vstore_folder="faiss_embedding",embeddings=globals()["embeddings"])
61
 
62
- # def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_device=None):
63
- # print("Loading model")
64
-
65
- # # Default values inside the function
66
- # if gpu_config is None:
67
- # gpu_config = chatf.gpu_config
68
- # if cpu_config is None:
69
- # cpu_config = chatf.cpu_config
70
- # if torch_device is None:
71
- # torch_device = chatf.torch_device
72
-
73
- # if model_type == "Mistral Open Orca (larger, slow)":
74
- # if torch_device == "cuda":
75
- # gpu_config.update_gpu(gpu_layers)
76
- # else:
77
- # gpu_config.update_gpu(gpu_layers)
78
- # cpu_config.update_gpu(gpu_layers)
79
-
80
- # print("Loading with", cpu_config.gpu_layers, "model layers sent to GPU.")
81
-
82
- # print(vars(gpu_config))
83
- # print(vars(cpu_config))
84
-
85
- # try:
86
- # #model = AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
87
- # #model = AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
88
- # model = AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
89
- # #model = AutoModelForCausalLM.from_pretrained('TheBloke/MistralLite-7B-GGUF', model_type='mistral', model_file='mistrallite.Q4_K_M.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
90
-
91
- # except:
92
- # #model = AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(cpu_config)) #**asdict(CtransRunConfig_gpu())
93
- # #model = AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu())
94
- # model = AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu())
95
- # #model = AutoModelForCausalLM.from_pretrained('TheBloke/MistralLite-7B-GGUF', model_type='mistral', model_file='mistrallite.Q4_K_M.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu())
96
-
97
- # tokenizer = []
98
-
99
- # if model_type == "Flan Alpaca (small, fast)":
100
- # # Huggingface chat model
101
- # hf_checkpoint = 'declare-lab/flan-alpaca-large'#'declare-lab/flan-alpaca-base' # # #
102
-
103
- # def create_hf_model(model_name):
104
-
105
- # from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM
106
-
107
- # if torch_device == "cuda":
108
- # if "flan" in model_name:
109
- # model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto")
110
- # else:
111
- # model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
112
- # else:
113
- # if "flan" in model_name:
114
- # model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
115
- # else:
116
- # model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
117
-
118
- # tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = chatf.context_length)
119
-
120
- # return model, tokenizer, model_type
121
-
122
- # model, tokenizer, model_type = create_hf_model(model_name = hf_checkpoint)
123
-
124
- # chatf.model = model
125
- # chatf.tokenizer = tokenizer
126
- # chatf.model_type = model_type
127
-
128
- # load_confirmation = "Finished loading model: " + model_type
129
-
130
- # print(load_confirmation)
131
- # return model_type, load_confirmation, model_type
132
-
133
 
134
  def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_device=None):
135
  print("Loading model")
 
9
  import pandas as pd
10
 
11
  from transformers import AutoTokenizer
 
 
12
  import torch
13
 
 
14
  from llama_cpp import Llama
15
  from huggingface_hub import hf_hub_download
16
 
 
56
  chatf.embeddings = load_embeddings(embeddings_name)
57
  chatf.vectorstore = get_faiss_store(faiss_vstore_folder="faiss_embedding",embeddings=globals()["embeddings"])
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_device=None):
61
  print("Loading model")
chatfuncs/chatfuncs.py CHANGED
@@ -38,8 +38,6 @@ from gensim.corpora import Dictionary
38
  from gensim.models import TfidfModel, OkapiBM25Model
39
  from gensim.similarities import SparseMatrixSimilarity
40
 
41
- import copy
42
- import llama_cpp
43
  from llama_cpp import Llama
44
  from huggingface_hub import hf_hub_download
45
 
 
38
  from gensim.models import TfidfModel, OkapiBM25Model
39
  from gensim.similarities import SparseMatrixSimilarity
40
 
 
 
41
  from llama_cpp import Llama
42
  from huggingface_hub import hf_hub_download
43