Yw22 commited on
Commit
76cfa14
1 Parent(s): d165074

update template

Browse files
app/src/base_model_template.py CHANGED
@@ -21,20 +21,20 @@ brushnet = BrushNetModel.from_pretrained(brushnet_path, torch_dtype=torch_dtype)
21
 
22
 
23
  base_models_list = [
24
- {
25
- "name": "dreamshaper_8 (Preload)",
26
- "local_path": "models/base_model/dreamshaper_8",
27
- "pipe": StableDiffusionBrushNetPipeline.from_pretrained(
28
- "models/base_model/dreamshaper_8", brushnet=brushnet, torch_dtype=torch_dtype, low_cpu_mem_usage=False
29
- ).to(device)
30
- },
31
- {
32
- "name": "epicrealism (Preload)",
33
- "local_path": "models/base_model/epicrealism_naturalSinRC1VAE",
34
- "pipe": StableDiffusionBrushNetPipeline.from_pretrained(
35
- "models/base_model/epicrealism_naturalSinRC1VAE", brushnet=brushnet, torch_dtype=torch_dtype, low_cpu_mem_usage=False
36
- ).to(device)
37
- },
38
  {
39
  "name": "henmixReal (Preload)",
40
  "local_path": "models/base_model/henmixReal_v5c",
 
21
 
22
 
23
  base_models_list = [
24
+ # {
25
+ # "name": "dreamshaper_8 (Preload)",
26
+ # "local_path": "models/base_model/dreamshaper_8",
27
+ # "pipe": StableDiffusionBrushNetPipeline.from_pretrained(
28
+ # "models/base_model/dreamshaper_8", brushnet=brushnet, torch_dtype=torch_dtype, low_cpu_mem_usage=False
29
+ # ).to(device)
30
+ # },
31
+ # {
32
+ # "name": "epicrealism (Preload)",
33
+ # "local_path": "models/base_model/epicrealism_naturalSinRC1VAE",
34
+ # "pipe": StableDiffusionBrushNetPipeline.from_pretrained(
35
+ # "models/base_model/epicrealism_naturalSinRC1VAE", brushnet=brushnet, torch_dtype=torch_dtype, low_cpu_mem_usage=False
36
+ # ).to(device)
37
+ # },
38
  {
39
  "name": "henmixReal (Preload)",
40
  "local_path": "models/base_model/henmixReal_v5c",
app/src/vlm_template.py CHANGED
@@ -28,22 +28,22 @@ vlms_list = [
28
  # "llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch_dtype, device_map=device
29
  # ).to("cpu"),
30
  # },
31
- {
32
- "type": "llava-next",
33
- "name": "llama3-llava-next-8b-hf (Preload)",
34
- "local_path": "models/vlms/llama3-llava-next-8b-hf",
35
- "processor": LlavaNextProcessor.from_pretrained(
36
- "models/vlms/llama3-llava-next-8b-hf"
37
- ) if os.path.exists("models/vlms/llama3-llava-next-8b-hf") else LlavaNextProcessor.from_pretrained(
38
- "llava-hf/llama3-llava-next-8b-hf"
39
- ),
40
- "model": LlavaNextForConditionalGeneration.from_pretrained(
41
- "models/vlms/llama3-llava-next-8b-hf", torch_dtype=torch_dtype, device_map=device
42
- ).to("cpu") if os.path.exists("models/vlms/llama3-llava-next-8b-hf") else
43
- LlavaNextForConditionalGeneration.from_pretrained(
44
- "llava-hf/llama3-llava-next-8b-hf", torch_dtype=torch_dtype, device_map=device
45
- ).to("cpu"),
46
- },
47
  # {
48
  # "type": "llava-next",
49
  # "name": "llava-v1.6-vicuna-13b-hf",
 
28
  # "llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch_dtype, device_map=device
29
  # ).to("cpu"),
30
  # },
31
+ # {
32
+ # "type": "llava-next",
33
+ # "name": "llama3-llava-next-8b-hf (Preload)",
34
+ # "local_path": "models/vlms/llama3-llava-next-8b-hf",
35
+ # "processor": LlavaNextProcessor.from_pretrained(
36
+ # "models/vlms/llama3-llava-next-8b-hf"
37
+ # ) if os.path.exists("models/vlms/llama3-llava-next-8b-hf") else LlavaNextProcessor.from_pretrained(
38
+ # "llava-hf/llama3-llava-next-8b-hf"
39
+ # ),
40
+ # "model": LlavaNextForConditionalGeneration.from_pretrained(
41
+ # "models/vlms/llama3-llava-next-8b-hf", torch_dtype=torch_dtype, device_map=device
42
+ # ).to("cpu") if os.path.exists("models/vlms/llama3-llava-next-8b-hf") else
43
+ # LlavaNextForConditionalGeneration.from_pretrained(
44
+ # "llava-hf/llama3-llava-next-8b-hf", torch_dtype=torch_dtype, device_map=device
45
+ # ).to("cpu"),
46
+ # },
47
  # {
48
  # "type": "llava-next",
49
  # "name": "llava-v1.6-vicuna-13b-hf",