File size: 3,048 Bytes
f4d1369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import runpod
import io
import base64
import time
import requests
import sys
import os
import traceback
from PIL import Image
import torch

def download_image(url):
    """Download an image from a URL and return a PIL Image object."""
    try:
        response = requests.get(url, stream=True)
        response.raise_for_status()
        return Image.open(io.BytesIO(response.content))
    except Exception as e:
        print(f"Error downloading image from {url}: {str(e)}")
        raise

def encode_image_to_base64(image):
    """Encode a PIL Image to base64 string."""
    buffered = io.BytesIO()
    image.save(buffered, format="PNG")
    img_str = base64.b64encode(buffered.getvalue()).decode()
    return img_str

def handler(event):
    try:
        print("Handler started, importing inference module...")
        from inference_utils import inference
        
        start_time = time.time()
        print("Processing request...")
        
        # Extract input data
        input_data = event["input"]["data"]
        
        if len(input_data) < 3:
            return {
                "status": "error",
                "message": "Missing required parameters. Expected [id_image_url, makeup_image_url, guidance_scale]"
            }
        
        id_image_url = input_data[0]
        makeup_image_url = input_data[1]
        guidance_scale = float(input_data[2]) if len(input_data) > 2 else 1.6
        
        print(f"Downloading images from URLs...")
        id_image = download_image(id_image_url)
        makeup_image = download_image(makeup_image_url)
        
        print(f"Running inference with guidance scale {guidance_scale}...")
        result_image = inference(id_image, makeup_image, guidance_scale)
        
        # Calculate processing time
        processing_time = time.time() - start_time
        print(f"Processing completed in {processing_time:.2f} seconds")
        
        # Return base64 encoded image
        return {
            "status": "completed",
            "image": encode_image_to_base64(result_image),
            "processingTime": processing_time
        }
    
    except Exception as e:
        # Print full exception for debugging
        print(f"Error in handler: {str(e)}")
        print(traceback.format_exc())
        
        # Clean up GPU memory
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            
        return {
            "status": "error",
            "message": str(e)
        }

if __name__ == "__main__":
    print(f"Starting RunPod Serverless handler from {os.getcwd()}")
    print(f"Python version: {sys.version}")
    print(f"CUDA available: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        print(f"CUDA device: {torch.cuda.get_device_name(0)}")
        
    # Check if all environment variables are set
    print(f"HF cache: {os.environ.get('HUGGINGFACE_HUB_CACHE')}")
    print(f"Torch home: {os.environ.get('TORCH_HOME')}")
    
    # Start the handler
    runpod.serverless.start({"handler": handler})