ffreemt commited on
Commit
0a62e2c
1 Parent(s): e565c65

Update predict_tr logger.debug

Browse files
Files changed (1) hide show
  1. app.py +27 -6
app.py CHANGED
@@ -17,14 +17,31 @@ from ctransformers import AutoModelForCausalLM
17
  from huggingface_hub import hf_hub_download
18
  from loguru import logger
19
 
20
- filename_list = ["Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin", "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
23
  MODEL_FILENAME = Path(URL).name
24
- MODEL_FILENAME = filename_list[0] # q2_K
25
- MODEL_FILENAME = filename_list[5] # q4_1
26
 
27
- REPO_ID = "/".join(urlparse(URL).path.strip('/').split('/')[:2]) # TheBloke/Wizard-Vicuna-7B-Uncensored-GGML
 
 
28
 
29
  DESTINATION_FOLDER = "models"
30
 
@@ -71,7 +88,10 @@ def predict_str(prompt, bot): # bot is in fact bot_history
71
  # bot.append([prompt, f"{response} {_}"])
72
  # return prompt, bot
73
 
74
- return prompt, bot + [[prompt, None]]
 
 
 
75
 
76
 
77
  def bot_str(bot):
@@ -242,6 +262,7 @@ def generate(
242
  **asdict(generation_config),
243
  )
244
 
 
245
  # if "mpt" in model_filename:
246
  # config = AutoConfig.from_pretrained("mosaicml/mpt-30b-cha t", context_length=8192)
247
  # llm = AutoModelForCausalLM.from_pretrained(
@@ -278,7 +299,7 @@ LLM = AutoModelForCausalLM.from_pretrained(
278
  # "TheBloke/WizardCoder-15B-1.0-GGML",
279
  REPO_ID, # DESTINATION_FOLDER, # model_path_or_repo_id: str required
280
  model_file=_,
281
- model_type="llama", # "starcoder", AutoConfig.from_pretrained(REPO_ID)
282
  threads=cpu_count,
283
  )
284
 
 
17
  from huggingface_hub import hf_hub_download
18
  from loguru import logger
19
 
20
+ filename_list = [
21
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
22
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
23
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
24
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
25
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
26
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
27
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
28
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
29
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
30
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
31
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
32
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
33
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
34
+ "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
35
+ ]
36
 
37
  URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
38
  MODEL_FILENAME = Path(URL).name
39
+ MODEL_FILENAME = filename_list[0] # q2_K 4.05G
40
+ MODEL_FILENAME = filename_list[5] # q4_1 4.21
41
 
42
+ REPO_ID = "/".join(
43
+ urlparse(URL).path.strip("/").split("/")[:2]
44
+ ) # TheBloke/Wizard-Vicuna-7B-Uncensored-GGML
45
 
46
  DESTINATION_FOLDER = "models"
47
 
 
88
  # bot.append([prompt, f"{response} {_}"])
89
  # return prompt, bot
90
 
91
+ _ = bot + [[prompt, None]]
92
+ logger.debug(f"{prompt=}, {_=}")
93
+
94
+ return prompt, _
95
 
96
 
97
  def bot_str(bot):
 
262
  **asdict(generation_config),
263
  )
264
 
265
+
266
  # if "mpt" in model_filename:
267
  # config = AutoConfig.from_pretrained("mosaicml/mpt-30b-cha t", context_length=8192)
268
  # llm = AutoModelForCausalLM.from_pretrained(
 
299
  # "TheBloke/WizardCoder-15B-1.0-GGML",
300
  REPO_ID, # DESTINATION_FOLDER, # model_path_or_repo_id: str required
301
  model_file=_,
302
+ model_type="llama", # "starcoder", AutoConfig.from_pretrained(REPO_ID)
303
  threads=cpu_count,
304
  )
305