Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- genimage.py +6 -0
- llmdolphin.py +10 -0
genimage.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import spaces
|
2 |
import torch
|
|
|
3 |
|
4 |
|
5 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
@@ -90,6 +91,7 @@ def generate_image(prompt, neg_prompt):
|
|
90 |
output_type="pil",
|
91 |
clip_skip=2,
|
92 |
).images
|
|
|
93 |
if images:
|
94 |
image_paths = [
|
95 |
save_image(image, metadata, "./outputs")
|
@@ -98,5 +100,9 @@ def generate_image(prompt, neg_prompt):
|
|
98 |
return image_paths
|
99 |
except Exception as e:
|
100 |
print(e)
|
|
|
101 |
return []
|
|
|
|
|
|
|
102 |
|
|
|
1 |
import spaces
|
2 |
import torch
|
3 |
+
import gc
|
4 |
|
5 |
|
6 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
91 |
output_type="pil",
|
92 |
clip_skip=2,
|
93 |
).images
|
94 |
+
pipe.to("cpu")
|
95 |
if images:
|
96 |
image_paths = [
|
97 |
save_image(image, metadata, "./outputs")
|
|
|
100 |
return image_paths
|
101 |
except Exception as e:
|
102 |
print(e)
|
103 |
+
pipe.to("cpu")
|
104 |
return []
|
105 |
+
finally:
|
106 |
+
torch.cuda.empty_cache()
|
107 |
+
gc.collect()
|
108 |
|
llmdolphin.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
from pathlib import Path
|
4 |
import re
|
5 |
import torch
|
|
|
6 |
from typing import Any
|
7 |
from huggingface_hub import hf_hub_download, HfApi
|
8 |
from llama_cpp import Llama
|
@@ -298,6 +299,9 @@ def dolphin_respond(
|
|
298 |
print(e)
|
299 |
raise gr.Error(f"Error: {e}")
|
300 |
#yield [("", None)]
|
|
|
|
|
|
|
301 |
|
302 |
|
303 |
def dolphin_parse(
|
@@ -402,6 +406,9 @@ def dolphin_respond_auto(
|
|
402 |
except Exception as e:
|
403 |
print(e)
|
404 |
yield [("", None)], gr.update(), gr.update()
|
|
|
|
|
|
|
405 |
|
406 |
|
407 |
def dolphin_parse_simple(
|
@@ -504,3 +511,6 @@ def respond_playground(
|
|
504 |
print(e)
|
505 |
raise gr.Error(f"Error: {e}")
|
506 |
#yield ""
|
|
|
|
|
|
|
|
3 |
from pathlib import Path
|
4 |
import re
|
5 |
import torch
|
6 |
+
import gc
|
7 |
from typing import Any
|
8 |
from huggingface_hub import hf_hub_download, HfApi
|
9 |
from llama_cpp import Llama
|
|
|
299 |
print(e)
|
300 |
raise gr.Error(f"Error: {e}")
|
301 |
#yield [("", None)]
|
302 |
+
finally:
|
303 |
+
torch.cuda.empty_cache()
|
304 |
+
gc.collect()
|
305 |
|
306 |
|
307 |
def dolphin_parse(
|
|
|
406 |
except Exception as e:
|
407 |
print(e)
|
408 |
yield [("", None)], gr.update(), gr.update()
|
409 |
+
finally:
|
410 |
+
torch.cuda.empty_cache()
|
411 |
+
gc.collect()
|
412 |
|
413 |
|
414 |
def dolphin_parse_simple(
|
|
|
511 |
print(e)
|
512 |
raise gr.Error(f"Error: {e}")
|
513 |
#yield ""
|
514 |
+
finally:
|
515 |
+
torch.cuda.empty_cache()
|
516 |
+
gc.collect()
|