reach-vb HF staff commited on
Commit
08e5ef1
1 Parent(s): 75b770e

Update app.py (#7)

Browse files

- Update app.py (a1f7384981822e9020217a6a22e21e9e5a76d02f)

Files changed (1) hide show
  1. app.py +37 -9
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import shutil
2
  import subprocess
3
 
@@ -6,6 +7,9 @@ import gradio as gr
6
  from huggingface_hub import create_repo, HfApi
7
  from huggingface_hub import snapshot_download
8
  from huggingface_hub import whoami
 
 
 
9
 
10
  api = HfApi()
11
 
@@ -29,26 +33,50 @@ def process_model(model_id, q_method, hf_token):
29
  print("Quantised successfully!")
30
 
31
  # Create empty repo
 
32
  repo_url = create_repo(
33
- repo_id = f"{username}/{MODEL_NAME}-{q_method}-GGUF",
34
  repo_type="model",
35
  exist_ok=True,
36
  token=hf_token
37
  )
38
  print("Empty repo created successfully!")
39
 
40
- # Upload gguf files
41
- # api.upload_folder(
42
- # folder_path=MODEL_NAME,
43
- # repo_id=f"{username}/{MODEL_NAME}-{q_method}-GGUF",
44
- # allow_patterns=["*.gguf"],
45
- # token=hf_token
46
- # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  api.upload_file(
48
  path_or_fileobj=qtype,
49
- repo_id=f"{username}/{MODEL_NAME}-{q_method}-GGUF",
 
50
  repo_type="model",
51
  )
 
 
 
 
 
 
 
 
52
  print("Uploaded successfully!")
53
 
54
  shutil.rmtree(MODEL_NAME)
 
1
+ import os
2
  import shutil
3
  import subprocess
4
 
 
7
  from huggingface_hub import create_repo, HfApi
8
  from huggingface_hub import snapshot_download
9
  from huggingface_hub import whoami
10
+ from huggingface_hub import ModelCard
11
+
12
+ from textwrap import dedent
13
 
14
  api = HfApi()
15
 
 
33
  print("Quantised successfully!")
34
 
35
  # Create empty repo
36
+ repo_id = f"{username}/{MODEL_NAME}-{q_method}-GGUF"
37
  repo_url = create_repo(
38
+ repo_id = repo_id,
39
  repo_type="model",
40
  exist_ok=True,
41
  token=hf_token
42
  )
43
  print("Empty repo created successfully!")
44
 
45
+
46
+ card = ModelCard.load(model_id)
47
+ card.data.tags = ["llama-cpp"] if card.data.tags is None else card.data.tags + ["llama-cpp"]
48
+ card.text = dedent(
49
+ f"""
50
+ # {upload_repo}
51
+ This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp.
52
+ Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
53
+ ## Use with llama.cpp
54
+
55
+ ```bash
56
+ brew install ggerganov/ggerganov/llama.cpp
57
+ ```
58
+
59
+ ```bash
60
+ llama-cli --hf-repo {repo_id} --model {qtype.split("/")[-1]} -p "The meaning to life and the universe is "
61
+ ```
62
+ """
63
+ )
64
+ card.save(os.path.join(MODEL_NAME, "README-new.md"))
65
+
66
  api.upload_file(
67
  path_or_fileobj=qtype,
68
+ path_in_repo=qtype.split("/")[-1],
69
+ repo_id=repo_id,
70
  repo_type="model",
71
  )
72
+
73
+ api.upload_file(
74
+ path_or_fileobj=f"{MODEL_NAME}/README-new.md",
75
+ path_in_repo=README.md,
76
+ repo_id=repo_id,
77
+ repo_type="model",
78
+ )
79
+
80
  print("Uploaded successfully!")
81
 
82
  shutil.rmtree(MODEL_NAME)