add big files.
Browse files- .gitattributes +3 -0
- .gitignore +1 -0
- README.md +6 -0
- main.py +9 -12
- requirements.txt +4 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
checkpoints/* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
dats/* filter=lfs diff=lfs merge=lfs -text
|
38 |
+
checkpoints/* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -5,3 +5,4 @@
|
|
5 |
/.idea
|
6 |
/.vs
|
7 |
/.vs/torchcsharp/CopilotIndices/17.12.38.29086/CodeChunks.db
|
|
|
|
5 |
/.idea
|
6 |
/.vs
|
7 |
/.vs/torchcsharp/CopilotIndices/17.12.38.29086/CodeChunks.db
|
8 |
+
/__pycache__
|
README.md
CHANGED
@@ -12,3 +12,9 @@
|
|
12 |
|
13 |
### Instructions
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
### Instructions
|
14 |
|
15 |
+
|
16 |
+
```
|
17 |
+
conda install pytorch torchvision torchaudio pytorch-cuda=12.4 -c pytorch -c nvidia
|
18 |
+
pip install protobuf 'transformers>=4.41.2' cpm_kernels 'torch>=2.0' gradio mdtex2html sentencepiece accelerate
|
19 |
+
pip install leb128
|
20 |
+
```
|
main.py
CHANGED
@@ -1,8 +1,5 @@
|
|
1 |
import os
|
2 |
|
3 |
-
from PIL import Image
|
4 |
-
from sympy import false
|
5 |
-
from torchvision import transforms
|
6 |
import torch
|
7 |
from torchvision import models
|
8 |
|
@@ -36,18 +33,18 @@ torch.set_default_device(cuda_device)
|
|
36 |
|
37 |
|
38 |
def export_model(model: torch.nn.Module, model_name: str):
|
39 |
-
cpu_filepath = os.path.join(dats_path, f"{model_name}
|
40 |
if os.path.exists(cpu_filepath):
|
41 |
os.remove(cpu_filepath)
|
42 |
with open(cpu_filepath, "wb") as f_cpu:
|
43 |
-
|
44 |
-
|
45 |
-
if is_support_cuda:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
|
52 |
|
53 |
model_list = []
|
|
|
1 |
import os
|
2 |
|
|
|
|
|
|
|
3 |
import torch
|
4 |
from torchvision import models
|
5 |
|
|
|
33 |
|
34 |
|
35 |
def export_model(model: torch.nn.Module, model_name: str):
|
36 |
+
cpu_filepath = os.path.join(dats_path, f"{model_name}.dat")
|
37 |
if os.path.exists(cpu_filepath):
|
38 |
os.remove(cpu_filepath)
|
39 |
with open(cpu_filepath, "wb") as f_cpu:
|
40 |
+
exportsd.save_state_dict(model.to(cpu_device).state_dict(), f_cpu)
|
41 |
+
|
42 |
+
# if is_support_cuda:
|
43 |
+
# cuda_filepath = os.path.join(dats_path, f"{model_name}.dat")
|
44 |
+
# if os.path.exists(cuda_filepath):
|
45 |
+
# os.remove(cuda_filepath)
|
46 |
+
# with open(cuda_filepath, "wb") as f_cuda:
|
47 |
+
# exportsd.save_state_dict(model.to(cuda_device).state_dict(), f_cuda)
|
48 |
|
49 |
|
50 |
model_list = []
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
leb128==1.0.8
|
2 |
+
numpy==2.2.2
|
3 |
+
torch==2.5.1
|
4 |
+
torchvision==0.20.1
|