tharms commited on
Commit
fdb11fc
·
1 Parent(s): 8857bb8

reverted to plan Fooocus

Browse files
Files changed (7) hide show
  1. Dockerfile +0 -1
  2. README.md +0 -10
  3. config.txt +0 -17
  4. config_modification_tutorial.txt +0 -131
  5. dummy_inference.py +0 -5
  6. inference.py +0 -65
  7. launch.py +0 -3
Dockerfile CHANGED
@@ -24,7 +24,6 @@ WORKDIR /content
24
  USER user
25
 
26
  RUN git clone https://github.com/lllyasviel/Fooocus /content/app
27
- COPY ../launch.py /content/app/launch.py
28
  RUN mv /content/app/models /content/app/models.org
29
 
30
  CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
 
24
  USER user
25
 
26
  RUN git clone https://github.com/lllyasviel/Fooocus /content/app
 
27
  RUN mv /content/app/models /content/app/models.org
28
 
29
  CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
README.md CHANGED
@@ -1,13 +1,3 @@
1
- ---
2
- title: Custom Ai
3
- emoji: 📈
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
11
  <div align=center>
12
  <img src="https://github.com/lllyasviel/Fooocus/assets/19834515/483fb86d-c9a2-4c20-997c-46dafc124f25">
13
 
 
 
 
 
 
 
 
 
 
 
 
1
  <div align=center>
2
  <img src="https://github.com/lllyasviel/Fooocus/assets/19834515/483fb86d-c9a2-4c20-997c-46dafc124f25">
3
 
config.txt DELETED
@@ -1,17 +0,0 @@
1
- {
2
- "path_checkpoints": [
3
- "/Users/tharms/workspace/gradio-gan/Fooocus/models/checkpoints"
4
- ],
5
- "path_loras": [
6
- "/Users/tharms/workspace/gradio-gan/Fooocus/models/loras"
7
- ],
8
- "path_embeddings": "/Users/tharms/workspace/gradio-gan/Fooocus/models/embeddings",
9
- "path_vae_approx": "/Users/tharms/workspace/gradio-gan/Fooocus/models/vae_approx",
10
- "path_upscale_models": "/Users/tharms/workspace/gradio-gan/Fooocus/models/upscale_models",
11
- "path_inpaint": "/Users/tharms/workspace/gradio-gan/Fooocus/models/inpaint",
12
- "path_controlnet": "/Users/tharms/workspace/gradio-gan/Fooocus/models/controlnet",
13
- "path_clip_vision": "/Users/tharms/workspace/gradio-gan/Fooocus/models/clip_vision",
14
- "path_fooocus_expansion": "/Users/tharms/workspace/gradio-gan/Fooocus/models/prompt_expansion/fooocus_expansion",
15
- "path_wildcards": "/Users/tharms/workspace/gradio-gan/Fooocus/wildcards",
16
- "path_outputs": "/Users/tharms/workspace/gradio-gan/Fooocus/outputs"
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config_modification_tutorial.txt DELETED
@@ -1,131 +0,0 @@
1
- You can modify your "/Users/tharms/workspace/gradio-gan/generative-media-ai/config.txt" using the below keys, formats, and examples.
2
- Do not modify this file. Modifications in this file will not take effect.
3
- This file is a tutorial and example. Please edit "/Users/tharms/workspace/gradio-gan/generative-media-ai/config.txt" to really change any settings.
4
- Remember to split the paths with "\\" rather than "\", and there is no "," before the last "}".
5
-
6
-
7
- {
8
- "path_checkpoints": [
9
- "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/checkpoints"
10
- ],
11
- "path_loras": [
12
- "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/loras"
13
- ],
14
- "path_embeddings": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/embeddings",
15
- "path_vae_approx": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/vae_approx",
16
- "path_upscale_models": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/upscale_models",
17
- "path_inpaint": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/inpaint",
18
- "path_controlnet": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/controlnet",
19
- "path_clip_vision": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/clip_vision",
20
- "path_fooocus_expansion": "/Users/tharms/workspace/gradio-gan/generative-media-ai/models/prompt_expansion/fooocus_expansion",
21
- "path_wildcards": "/Users/tharms/workspace/gradio-gan/generative-media-ai/wildcards",
22
- "path_outputs": "/Users/tharms/workspace/gradio-gan/Fooocus/outputs",
23
- "temp_path": "/var/folders/29/5gn3wpkn70x7xyrzs2l59jdc0000gq/T/fooocus",
24
- "temp_path_cleanup_on_launch": true,
25
- "default_model": "juggernautXL_v8Rundiffusion.safetensors",
26
- "previous_default_models": [
27
- "juggernautXL_version8Rundiffusion.safetensors",
28
- "juggernautXL_version7Rundiffusion.safetensors",
29
- "juggernautXL_v7Rundiffusion.safetensors",
30
- "juggernautXL_version6Rundiffusion.safetensors",
31
- "juggernautXL_v6Rundiffusion.safetensors"
32
- ],
33
- "default_refiner": "None",
34
- "default_refiner_switch": 0.5,
35
- "default_loras_min_weight": -2,
36
- "default_loras_max_weight": 2,
37
- "default_loras": [
38
- [
39
- true,
40
- "sd_xl_offset_example-lora_1.0.safetensors",
41
- 0.1
42
- ],
43
- [
44
- true,
45
- "None",
46
- 1.0
47
- ],
48
- [
49
- true,
50
- "None",
51
- 1.0
52
- ],
53
- [
54
- true,
55
- "None",
56
- 1.0
57
- ],
58
- [
59
- true,
60
- "None",
61
- 1.0
62
- ]
63
- ],
64
- "default_max_lora_number": 5,
65
- "default_cfg_scale": 4.0,
66
- "default_sample_sharpness": 2.0,
67
- "default_sampler": "dpmpp_2m_sde_gpu",
68
- "default_scheduler": "karras",
69
- "default_styles": [
70
- "Fooocus V2",
71
- "Fooocus Enhance",
72
- "Fooocus Sharp"
73
- ],
74
- "default_prompt_negative": "",
75
- "default_prompt": "",
76
- "default_performance": "Speed",
77
- "default_advanced_checkbox": false,
78
- "default_max_image_number": 32,
79
- "default_output_format": "png",
80
- "default_image_number": 2,
81
- "checkpoint_downloads": {
82
- "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
83
- },
84
- "lora_downloads": {
85
- "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
86
- },
87
- "embeddings_downloads": {},
88
- "available_aspect_ratios": [
89
- "704*1408",
90
- "704*1344",
91
- "768*1344",
92
- "768*1280",
93
- "832*1216",
94
- "832*1152",
95
- "896*1152",
96
- "896*1088",
97
- "960*1088",
98
- "960*1024",
99
- "1024*1024",
100
- "1024*960",
101
- "1088*960",
102
- "1088*896",
103
- "1152*896",
104
- "1152*832",
105
- "1216*832",
106
- "1280*768",
107
- "1344*768",
108
- "1344*704",
109
- "1408*704",
110
- "1472*704",
111
- "1536*640",
112
- "1600*640",
113
- "1664*576",
114
- "1728*576"
115
- ],
116
- "default_aspect_ratio": "1152*896",
117
- "default_inpaint_engine_version": "v2.6",
118
- "default_cfg_tsnr": 7.0,
119
- "default_overwrite_step": -1,
120
- "default_overwrite_switch": -1,
121
- "example_inpaint_prompts": [
122
- "highly detailed face",
123
- "detailed girl face",
124
- "detailed man face",
125
- "detailed hand",
126
- "beautiful eyes"
127
- ],
128
- "default_save_metadata_to_images": false,
129
- "default_metadata_scheme": "fooocus",
130
- "metadata_created_by": ""
131
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dummy_inference.py DELETED
@@ -1,5 +0,0 @@
1
- def infer_stable_diffusion(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
2
- return "dummy_image"
3
-
4
- def infer_dall_e(text, model, quality, size):
5
- return "dummy_image"
 
 
 
 
 
 
inference.py DELETED
@@ -1,65 +0,0 @@
1
- import numpy as np
2
- import random
3
- from diffusers import DiffusionPipeline
4
- import torch
5
- from openai import OpenAI
6
- from dotenv import load_dotenv
7
- import os
8
-
9
- load_dotenv()
10
- openai_key = os.getenv("OPENAI_KEY")
11
-
12
- if openai_key == "<YOUR_OPENAI_KEY>":
13
- openai_key = ""
14
-
15
- if openai_key == "":
16
- sys.exit("Please Provide Your OpenAI API Key")
17
-
18
- device = "cuda" if torch.cuda.is_available() else "cpu"
19
-
20
- if torch.cuda.is_available():
21
- torch.cuda.max_memory_allocated(device=device)
22
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
23
- pipe.enable_xformers_memory_efficient_attention()
24
- pipe = pipe.to(device)
25
- else:
26
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
27
- pipe = pipe.to(device)
28
-
29
- MAX_SEED = np.iinfo(np.int32).max
30
-
31
- def infer_stable_diffusion(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
32
-
33
- if randomize_seed:
34
- seed = random.randint(0, MAX_SEED)
35
-
36
- generator = torch.Generator().manual_seed(seed)
37
-
38
- image = pipe(
39
- prompt = prompt,
40
- negative_prompt = negative_prompt,
41
- guidance_scale = guidance_scale,
42
- num_inference_steps = num_inference_steps,
43
- width = width,
44
- height = height,
45
- generator = generator
46
- ).images[0]
47
-
48
- return image
49
-
50
- def infer_dall_e(text, model, quality, size):
51
- try:
52
- client = OpenAI(api_key=openai_key)
53
-
54
- response = client.images.generate(
55
- prompt=text,
56
- model=model,
57
- quality=quality,
58
- size=size,
59
- n=1,
60
- )
61
- except Exception as error:
62
- print(str(error))
63
- raise gr.Error("An error occurred while generating image.")
64
-
65
- return response.data[0].url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
launch.py CHANGED
@@ -2,7 +2,6 @@ import os
2
  import ssl
3
  import sys
4
 
5
- print('CUSTOMIZED LAUNCHER')
6
  print('[System ARGV] ' + str(sys.argv))
7
 
8
  root = os.path.dirname(os.path.abspath(__file__))
@@ -39,7 +38,6 @@ def prepare_environment():
39
  if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
40
  run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
41
 
42
- print('XFORMERS TRY_INSTALL_XFORMERS=', {TRY_INSTALL_XFORMERS})
43
  if TRY_INSTALL_XFORMERS:
44
  if REINSTALL_ALL or not is_installed("xformers"):
45
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23')
@@ -55,7 +53,6 @@ def prepare_environment():
55
  elif platform.system() == "Linux":
56
  run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
57
 
58
- print('REINSTALL_ALL=', {REINSTALL_ALL})
59
  if REINSTALL_ALL or not requirements_met(requirements_file):
60
  run_pip(f"install -r \"{requirements_file}\"", "requirements")
61
 
 
2
  import ssl
3
  import sys
4
 
 
5
  print('[System ARGV] ' + str(sys.argv))
6
 
7
  root = os.path.dirname(os.path.abspath(__file__))
 
38
  if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
39
  run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
40
 
 
41
  if TRY_INSTALL_XFORMERS:
42
  if REINSTALL_ALL or not is_installed("xformers"):
43
  xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23')
 
53
  elif platform.system() == "Linux":
54
  run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
55
 
 
56
  if REINSTALL_ALL or not requirements_met(requirements_file):
57
  run_pip(f"install -r \"{requirements_file}\"", "requirements")
58