nekofura commited on
Commit
a40c12e
1 Parent(s): e39383a

Upload 2 files

Browse files
Files changed (2) hide show
  1. image_generator (3).py +21 -0
  2. inference (2).py +86 -0
image_generator (3).py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline, LCMScheduler
2
+ import torch
3
+
4
+ loaded_pipe = None
5
+ loaded_pipe_id = None
6
+
7
+ def load_model(pipe_id):
8
+ global loaded_pipe, loaded_pipe_id
9
+ if loaded_pipe_id != pipe_id:
10
+ loaded_pipe = DiffusionPipeline.from_pretrained(pipe_id, torch_dtype=torch.float16).to("cuda")
11
+ loaded_pipe.scheduler = LCMScheduler.from_config(loaded_pipe.scheduler.config)
12
+ loaded_pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
13
+ loaded_pipe_id = pipe_id
14
+ return loaded_pipe
15
+
16
+ def generate_image(prompt, num_inference_steps, seed, guidance_scale, negative_prompt=None, pipe_id="Linaqruf/animagine-xl"):
17
+ global loaded_pipe
18
+ pipe = load_model(pipe_id)
19
+ generator = torch.manual_seed(seed)
20
+ image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale).images[0]
21
+ return image
inference (2).py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Body
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ import uvicorn
4
+ from pyngrok import ngrok
5
+ import os
6
+ from image_generator import generate_image
7
+ import base64
8
+ from io import BytesIO
9
+ import threading
10
+ import queue
11
+
12
+ app = FastAPI()
13
+
14
+ # Set up CORS
15
+ app.add_middleware(
16
+ CORSMiddleware,
17
+ allow_origins=["*"], # Izinkan semua origin
18
+ allow_credentials=True,
19
+ allow_methods=["*"], # Izinkan semua method
20
+ allow_headers=["*"], # Izinkan semua header
21
+ )
22
+
23
+ request_queue = queue.Queue()
24
+ result_queue = {}
25
+
26
+ def process_request():
27
+ while True:
28
+ request_id, data = request_queue.get()
29
+ try:
30
+ image = generate_image(
31
+ data['prompt'],
32
+ data['num_inference_steps'],
33
+ data['seed'],
34
+ data['guidance_scale'],
35
+ negative_prompt=data.get('negative_prompt')
36
+ )
37
+ buffered = BytesIO()
38
+ image.save(buffered, format="JPEG")
39
+ img_str = base64.b64encode(buffered.getvalue()).decode()
40
+ result_queue[request_id] = img_str
41
+ except Exception as e:
42
+ result_queue[request_id] = str(e)
43
+ finally:
44
+ request_queue.task_done()
45
+
46
+ @app.post('/generate')
47
+ def generate(
48
+ prompt: str = Body(...),
49
+ num_inference_steps: int = Body(default=30),
50
+ seed: int = Body(default=42),
51
+ guidance_scale: float = Body(default=1.0),
52
+ negative_prompt: str = Body(default=None)
53
+ ):
54
+ if not prompt:
55
+ raise HTTPException(status_code=400, detail="No prompt provided")
56
+
57
+ request_id = os.urandom(8).hex()
58
+ request_queue.put((request_id, {
59
+ 'prompt': prompt,
60
+ 'num_inference_steps': num_inference_steps,
61
+ 'seed': seed,
62
+ 'guidance_scale': guidance_scale,
63
+ 'negative_prompt': negative_prompt
64
+ }))
65
+
66
+ return {'request_id': request_id}
67
+
68
+ @app.get('/result/{request_id}')
69
+ def get_result(request_id: str):
70
+ if request_id in result_queue:
71
+ result = result_queue.pop(request_id)
72
+ return {'image': result}
73
+ return {'status': 'processing'}, 202
74
+
75
+ @app.get('/get')
76
+ def get_status():
77
+ return {'status': 'Server is running'}
78
+
79
+ def start_ngrok():
80
+ ngrok_tunnel = ngrok.connect(8000)
81
+ print(' * Ngrok Tunnel URL:', ngrok_tunnel.public_url)
82
+
83
+ if __name__ == '__main__':
84
+ threading.Thread(target=process_request, daemon=True).start()
85
+ threading.Thread(target=start_ngrok, daemon=True).start()
86
+ uvicorn.run(app, host="0.0.0.0", port=8000)