ifire commited on
Commit
ee576e9
1 Parent(s): 8872a28

Load the models.

Browse files
.gitignore CHANGED
@@ -4,3 +4,5 @@ render.obj
4
  .cog/
5
 
6
  __pycache__/
 
 
 
4
  .cog/
5
 
6
  __pycache__/
7
+
8
+ output.obj
predict.py CHANGED
@@ -3,13 +3,10 @@ import tempfile
3
  import time
4
  from meshgpt_pytorch import MeshTransformer, mesh_render
5
 
6
-
7
  class Predictor(BasePredictor):
8
  def setup(self):
9
  """Load the model into memory to make running multiple predictions efficient"""
10
- self.transformer = MeshTransformer.from_pretrained(
11
- "MarcusLoren/MeshGPT-preview"
12
- )
13
 
14
  def predict(
15
  self,
@@ -54,3 +51,10 @@ class Predictor(BasePredictor):
54
  file_path = Path(tempfile.mkdtemp()) / file_name
55
  mesh_render.save_rendering(str(file_path), output)
56
  return file_path
 
 
 
 
 
 
 
 
3
  import time
4
  from meshgpt_pytorch import MeshTransformer, mesh_render
5
 
 
6
  class Predictor(BasePredictor):
7
  def setup(self):
8
  """Load the model into memory to make running multiple predictions efficient"""
9
+ self.transformer = MeshTransformer.from_pretrained("./transformer_directory")
 
 
10
 
11
  def predict(
12
  self,
 
51
  file_path = Path(tempfile.mkdtemp()) / file_name
52
  mesh_render.save_rendering(str(file_path), output)
53
  return file_path
54
+
55
+
56
+ if __name__ == "__main__":
57
+ model_name = "MarcusLoren/MeshGPT-preview"
58
+
59
+ model = MeshTransformer.from_pretrained("MarcusLoren/MeshGPT-preview")
60
+ model.save_pretrained("./transformer_directory")
transformer_directory/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - pytorch_model_hub_mixin
4
+ - model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Library: [More Information Needed]
9
+ - Docs: [More Information Needed]
transformer_directory/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attn_depth": 12,
3
+ "attn_dim_head": 64,
4
+ "attn_heads": 12,
5
+ "attn_kwargs": {
6
+ "attn_num_mem_kv": 4,
7
+ "ff_glu": true
8
+ },
9
+ "coarse_pre_gateloop_depth": 2,
10
+ "condition_on_text": true,
11
+ "cross_attn_num_mem_kv": 4,
12
+ "dim": 768,
13
+ "dropout": 0.0,
14
+ "fine_attn_depth": 2,
15
+ "fine_attn_dim_head": 32,
16
+ "fine_attn_heads": 8,
17
+ "fine_cross_attend_text": true,
18
+ "fine_pre_gateloop_depth": 2,
19
+ "flash_attn": true,
20
+ "gateloop_use_heinsen": false,
21
+ "max_seq_len": 1500,
22
+ "num_sos_tokens": 1,
23
+ "pad_id": -1,
24
+ "quads": false,
25
+ "text_cond_with_film": false,
26
+ "text_condition_cond_drop_prob": 0.0,
27
+ "text_condition_model_types": "bge"
28
+ }
transformer_directory/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2782fe5aab4625a1b595b311ceac27cd6198370f935998a8707dd81d6706fc12
3
+ size 926663752