Spaces:
Running
on
Zero
Running
on
Zero
Kunpeng Song
commited on
Commit
•
ec3730d
1
Parent(s):
aa13efa
fix zero
Browse files- .DS_Store +0 -0
- app.py +3 -4
- dataset_lib/dataset_eval_MoMA.py +1 -1
- model_lib/attention_processor.py +0 -2
- model_lib/moMA_generator.py +0 -3
- model_lib/modules.py +0 -1
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
app.py
CHANGED
@@ -1,12 +1,10 @@
|
|
1 |
import spaces
|
|
|
2 |
import gradio as gr
|
3 |
-
import cv2
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
-
from torchvision import transforms
|
7 |
import torch
|
8 |
from pytorch_lightning import seed_everything
|
9 |
-
from torchvision.utils import save_image
|
10 |
from model_lib.modules import MoMA_main_modal
|
11 |
from model_lib.utils import parse_args
|
12 |
import os
|
@@ -14,11 +12,12 @@ os.environ["CUDA_VISIBLE_DEVICES"]="0"
|
|
14 |
|
15 |
title = "MoMA"
|
16 |
description = "This model has to run on GPU. By default, we load the model with 4-bit quantization to make it fit in smaller hardware."
|
|
|
17 |
|
18 |
seed_everything(0)
|
19 |
args = parse_args()
|
20 |
#load MoMA from HuggingFace. Auto download
|
21 |
-
model = MoMA_main_modal(args).to(
|
22 |
|
23 |
def MoMA_demo(rgb, subject, prompt, strength, seed):
|
24 |
with torch.no_grad():
|
|
|
1 |
import spaces
|
2 |
+
|
3 |
import gradio as gr
|
|
|
4 |
import torch
|
5 |
import numpy as np
|
|
|
6 |
import torch
|
7 |
from pytorch_lightning import seed_everything
|
|
|
8 |
from model_lib.modules import MoMA_main_modal
|
9 |
from model_lib.utils import parse_args
|
10 |
import os
|
|
|
12 |
|
13 |
title = "MoMA"
|
14 |
description = "This model has to run on GPU. By default, we load the model with 4-bit quantization to make it fit in smaller hardware."
|
15 |
+
device = torch.device('cuda')
|
16 |
|
17 |
seed_everything(0)
|
18 |
args = parse_args()
|
19 |
#load MoMA from HuggingFace. Auto download
|
20 |
+
model = MoMA_main_modal(args).to(device, dtype=torch.float16)
|
21 |
|
22 |
def MoMA_demo(rgb, subject, prompt, strength, seed):
|
23 |
with torch.no_grad():
|
dataset_lib/dataset_eval_MoMA.py
CHANGED
@@ -2,7 +2,7 @@ from PIL import Image
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from torchvision import transforms
|
5 |
-
from llava.mm_utils import process_images
|
6 |
from rembg import remove
|
7 |
|
8 |
def create_binary_mask(image):
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from torchvision import transforms
|
5 |
+
from llava.mm_utils import process_images
|
6 |
from rembg import remove
|
7 |
|
8 |
def create_binary_mask(image):
|
model_lib/attention_processor.py
CHANGED
@@ -4,8 +4,6 @@ import torch.nn as nn
|
|
4 |
import torch.nn.functional as F
|
5 |
from einops import rearrange
|
6 |
import math
|
7 |
-
from torchvision.utils import save_image
|
8 |
-
import torchvision.transforms as T
|
9 |
|
10 |
def get_mask_from_cross(attn_processors):
|
11 |
reference_masks = []
|
|
|
4 |
import torch.nn.functional as F
|
5 |
from einops import rearrange
|
6 |
import math
|
|
|
|
|
7 |
|
8 |
def get_mask_from_cross(attn_processors):
|
9 |
reference_masks = []
|
model_lib/moMA_generator.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
-
from typing import List
|
2 |
import torch
|
3 |
-
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
4 |
-
from PIL import Image
|
5 |
from model_lib.attention_processor import IPAttnProcessor, IPAttnProcessor_Self, get_mask_from_cross
|
6 |
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
|
7 |
import tqdm
|
|
|
|
|
1 |
import torch
|
|
|
|
|
2 |
from model_lib.attention_processor import IPAttnProcessor, IPAttnProcessor_Self, get_mask_from_cross
|
3 |
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
|
4 |
import tqdm
|
model_lib/modules.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
from PIL import Image
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
from typing import List, Optional
|
|
|
1 |
import os
|
|
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
from typing import List, Optional
|