reach-vb HF staff commited on
Commit
1504cda
1 Parent(s): 3ad22ce

Remove references to convert.py (#73)

Browse files

- Remove references to convert.py (cb0a2b8372fbdccbfc50f8001503a36898f34738)

Files changed (1) hide show
  1. app.py +1 -12
app.py CHANGED
@@ -15,19 +15,8 @@ from apscheduler.schedulers.background import BackgroundScheduler
15
 
16
  from textwrap import dedent
17
 
18
- LLAMA_LIKE_ARCHS = ["MistralForCausalLM",]
19
  HF_TOKEN = os.environ.get("HF_TOKEN")
20
 
21
- def script_to_use(model_id, api):
22
- info = api.model_info(model_id)
23
- if info.config is None:
24
- return None
25
- arch = info.config.get("architectures", None)
26
- if arch is None:
27
- return None
28
- arch = arch[0]
29
- return "convert.py" if arch in LLAMA_LIKE_ARCHS else "convert-hf-to-gguf.py"
30
-
31
  def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None):
32
  if oauth_token.token is None:
33
  raise ValueError("You have to be logged in.")
@@ -98,7 +87,7 @@ def process_model(model_id, q_method, private_repo, split_model, split_max_tenso
98
  print(f"Current working directory: {os.getcwd()}")
99
  print(f"Model directory contents: {os.listdir(model_name)}")
100
 
101
- conversion_script = script_to_use(model_id, api)
102
  fp16_conversion = f"python llama.cpp/{conversion_script} {model_name} --outtype f16 --outfile {fp16}"
103
  result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
104
  print(result)
 
15
 
16
  from textwrap import dedent
17
 
 
18
  HF_TOKEN = os.environ.get("HF_TOKEN")
19
 
 
 
 
 
 
 
 
 
 
 
20
  def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None):
21
  if oauth_token.token is None:
22
  raise ValueError("You have to be logged in.")
 
87
  print(f"Current working directory: {os.getcwd()}")
88
  print(f"Model directory contents: {os.listdir(model_name)}")
89
 
90
+ conversion_script = "convert-hf-to-gguf.py"
91
  fp16_conversion = f"python llama.cpp/{conversion_script} {model_name} --outtype f16 --outfile {fp16}"
92
  result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
93
  print(result)