OmParkashPandeY commited on
Commit
f450f63
1 Parent(s): 4937f95

Upload 6 files

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv/
2
+ .env
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ from PIL import Image
4
+ from dotenv import load_dotenv, find_dotenv
5
+ _ = load_dotenv(find_dotenv()) # read local .env file
6
+ hf_api_key = os.environ['HF_API_KEY']
7
+
8
+ # Helper function
9
+ import requests, json
10
+
11
+ # API_URL = "https://api-inference.huggingface.co/models/sayakpaul/text-to-image-pokemons-gpt4"
12
+ # API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
13
+ # API_URL = "https://api-inference.huggingface.co/models/cloudqi/cqi_text_to_image_pt_v0"
14
+ # API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
15
+ # API_URL = "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7"
16
+ # API_URL = "https://api-inference.huggingface.co/models/tensor-diffusion/majicMIX-realistic-v7"
17
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/majicMIX_realistic_v1"
18
+
19
+
20
+
21
+ #Text-to-image endpoint
22
+ def get_completion(inputs, parameters=None, ENDPOINT_URL=API_URL):
23
+ headers = {
24
+ "Authorization": f"Bearer {hf_api_key}",
25
+ "Content-Type": "application/json"
26
+ }
27
+ data = { "inputs": inputs }
28
+ if parameters is not None:
29
+ data.update({"parameters": parameters})
30
+ response = requests.request("POST",ENDPOINT_URL,headers=headers,data=json.dumps(data))
31
+ return response.content
32
+
33
+ import gradio as gr
34
+
35
+ def generate(prompt):
36
+ output = get_completion(prompt)
37
+ result_image = Image.open(io.BytesIO(output))
38
+ return result_image
39
+
40
+ import gradio as gr
41
+
42
+ def generate(prompt, negative_prompt, steps, guidance, width, height):
43
+ params = {
44
+ "negative_prompt": negative_prompt,
45
+ "num_inference_steps": steps,
46
+ "guidance_scale": guidance,
47
+ "width": width,
48
+ "height": height
49
+ }
50
+
51
+ output = get_completion(prompt, params)
52
+ pil_image = Image.open(io.BytesIO(output))
53
+ return pil_image
54
+
55
+ def loadGUI():
56
+ with gr.Blocks() as demo:
57
+ gr.Markdown("# Image Generation with Stable Diffusion - Magic Mix V1")
58
+ with gr.Row():
59
+ with gr.Column(scale=4):
60
+ prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate
61
+ with gr.Column(scale=1, min_width=50):
62
+ btn = gr.Button("Submit") #Submit button side by side!
63
+ with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options!
64
+ negative_prompt = gr.Textbox(label="Negative prompt")
65
+ with gr.Row():
66
+ with gr.Column():
67
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, step=32, value=25,
68
+ info="In many steps will the denoiser denoise the image?")
69
+ guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=32, value=7,
70
+ info="Controls how much the text prompt influences the result")
71
+ with gr.Column():
72
+ width = gr.Slider(label="Width", minimum=64, maximum=1024, step=32, value=512)
73
+ height = gr.Slider(label="Height", minimum=64, maximum=1024, step=32, value=512)
74
+ output = gr.Image(label="Result") #Move the output up too
75
+
76
+ btn.click(fn=generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output])
77
+
78
+ gr.close_all()
79
+ demo.launch(share=True)
80
+
81
+ def main():
82
+ loadGUI()
83
+
84
+
85
+ if __name__ == "__main__":
86
+ main()
87
+
images/helicopter.jpg ADDED
images/maxresdefault.jpg ADDED
images/police-heli.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.25.0
2
+ aiofiles==23.2.1
3
+ altair==5.2.0
4
+ annotated-types==0.6.0
5
+ anyio==3.7.1
6
+ attrs==23.1.0
7
+ certifi==2023.11.17
8
+ charset-normalizer==3.3.2
9
+ click==8.1.7
10
+ colorama==0.4.6
11
+ contourpy==1.2.0
12
+ cycler==0.12.1
13
+ fastapi==0.104.1
14
+ ffmpy==0.3.1
15
+ filelock==3.13.1
16
+ fonttools==4.46.0
17
+ fsspec==2023.12.1
18
+ gradio==4.8.0
19
+ gradio_client==0.7.1
20
+ h11==0.14.0
21
+ httpcore==1.0.2
22
+ httpx==0.25.2
23
+ huggingface-hub==0.19.4
24
+ idna==3.6
25
+ importlib-resources==6.1.1
26
+ Jinja2==3.1.2
27
+ jsonschema==4.20.0
28
+ jsonschema-specifications==2023.11.2
29
+ kiwisolver==1.4.5
30
+ markdown-it-py==3.0.0
31
+ MarkupSafe==2.1.3
32
+ matplotlib==3.8.2
33
+ mdurl==0.1.2
34
+ mpmath==1.3.0
35
+ networkx==3.2.1
36
+ numpy==1.26.2
37
+ orjson==3.9.10
38
+ packaging==23.2
39
+ pandas==2.1.4
40
+ Pillow==10.1.0
41
+ psutil==5.9.6
42
+ pydantic==2.5.2
43
+ pydantic_core==2.14.5
44
+ pydub==0.25.1
45
+ Pygments==2.17.2
46
+ pyparsing==3.1.1
47
+ python-dateutil==2.8.2
48
+ python-dotenv==1.0.0
49
+ python-multipart==0.0.6
50
+ pytz==2023.3.post1
51
+ PyYAML==6.0.1
52
+ referencing==0.32.0
53
+ regex==2023.10.3
54
+ requests==2.31.0
55
+ rich==13.7.0
56
+ rpds-py==0.13.2
57
+ safetensors==0.4.1
58
+ semantic-version==2.10.0
59
+ shellingham==1.5.4
60
+ six==1.16.0
61
+ sniffio==1.3.0
62
+ starlette==0.27.0
63
+ sympy==1.12
64
+ tokenizers==0.15.0
65
+ tomlkit==0.12.0
66
+ toolz==0.12.0
67
+ torch==2.1.1
68
+ tqdm==4.66.1
69
+ transformers==4.35.2
70
+ typer==0.9.0
71
+ typing_extensions==4.8.0
72
+ tzdata==2023.3
73
+ urllib3==2.1.0
74
+ uvicorn==0.24.0.post1
75
+ websockets==11.0.3