import json
import os
import uuid
import logging
from google.cloud import pubsub_v1
from google.cloud import storage
import requests
import time
from PIL import Image, ImageSequence, ImageOps
import numpy as np
import torch
from io import BytesIO
import datetime

# Set up logging
logging.basicConfig(level=logging.INFO)

workflow_file_path = os.path.join(
    os.path.dirname(os.path.abspath(__file__)), "space_preview_v4.json"
)
config_file_path = os.path.join(
    os.path.dirname(os.path.abspath(__file__)), "gcp_config.json"
)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config_file_path


def pil2tensor(img):
    output_images = []
    output_masks = []
    for i in ImageSequence.Iterator(img):
        i = ImageOps.exif_transpose(i)
        if i.mode == "I":
            i = i.point(lambda i: i * (1 / 255))
        image = i.convert("RGB")
        image = np.array(image).astype(np.float32) / 255.0
        image = torch.from_numpy(image)[None,]
        if "A" in i.getbands():
            mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0
            mask = 1.0 - torch.from_numpy(mask)
        else:
            mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
        output_images.append(image)
        output_masks.append(mask.unsqueeze(0))

    if len(output_images) > 1:
        output_image = torch.cat(output_images, dim=0)
        output_mask = torch.cat(output_masks, dim=0)
    else:
        output_image = output_images[0]
        output_mask = output_masks[0]

    return (output_image, output_mask)


class BatchPreviewer:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": (
                    "STRING",
                    {
                        "multiline": True,
                        "default": "",
                    },
                ),
                "seeds": (
                    "STRING",
                    {
                        "multiline": False,
                        "default": "",
                    },
                ),
            },
        }

    # Define each image individually
    RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE")
    RETURN_NAMES = ("image 1", "image 2", "image 3", "image 4")
    FUNCTION = "process"
    OUTPUT_NODE = True
    CATEGORY = "Genera"

    def __init__(self):
        logging.info("Initializing BatchPreviewer...")
        self.publisher = pubsub_v1.PublisherClient()
        self.subscriber = pubsub_v1.SubscriberClient()
        self.topic_name = "projects/genera-408110/topics/generaspace.stageone.jobs"
        self.subscription_id = (
            "projects/genera-408110/subscriptions/generaspace.stageone.results-sub"
        )
        storage_client = storage.Client()
        self.bucket = storage_client.bucket("space-previewer")
        logging.info("BatchPreviewer initialized successfully.")

    def load_image_from_bucket(self, bucket_path):
        try:
            blob = self.bucket.blob(bucket_path)
            image_data = blob.download_as_bytes()
            img = Image.open(BytesIO(image_data))
            file_name = os.path.basename(bucket_path)
            return img, file_name

        except Exception as e:
            print(f"Error loading image from bucket: {e}")
            return None, None

    def create_signed_url(self, file_name: str, expiration: int = 3600) -> str:
        try:
            blob = self.bucket.blob(file_name)

            # Set expiration time
            expiration_time = datetime.timedelta(seconds=expiration)

            # Generate signed URL
            url = blob.generate_signed_url(
                version="v4",
                expiration=expiration_time,
                method="PUT",
                content_type="image/png",
            )

            return url
        except Exception as error:
            print("Error generating signed upload URL:", error)
            raise RuntimeError("Failed to generate signed upload URL.")

    def process(self, prompt, seeds):
        logging.info("Processing job with prompt and seeds.")

        # Parse seeds
        seed_numbers = [
            int(seed.strip()) for seed in seeds.split(",") if seed.strip().isdigit()
        ]
        logging.info(f"Parsed seeds: {seed_numbers}")

        # Load workflow from JSON file
        try:
            with open(workflow_file_path) as f:
                base_workflow = json.load(f)
            logging.info("Loaded base workflow successfully.")
        except Exception as e:
            logging.error(f"Error loading workflow: {e}")
            return []

        jobs = []
        job_ids = set()

        # Modify workflow per seed
        for seed in seed_numbers:

            workflow = base_workflow.copy()

            if "530" in workflow:
                workflow["530"]["inputs"]["text"] = prompt
            if "81" in workflow:
                workflow["81"]["inputs"]["noise_seed"] = seed
            if "532" in workflow:
                workflow["532"]["inputs"]["filename_prefix"] = str(uuid.uuid4())[:4]

            job_id = str(uuid.uuid4())
            job_ids.add(job_id)

            result_url = self.create_signed_url(f"stage-one/{job_id}/result.png")

            job = {
                "generationId": job_id,
                "resultUrl": result_url,
                "data": {"client_id": str(uuid.uuid4()), "prompt": workflow},
            }

            sample_file_path = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "job_sample.json"
            )
            try:
                with open(sample_file_path, "w") as json_file:
                    json.dump(job, json_file, indent=4)
                logging.info(f"Saved job sample to {sample_file_path}")
            except Exception as e:
                logging.error(f"Failed to save job sample: {e}")

            jobs.append(job)

            # Publish job to Pub/Sub
            try:
                message = json.dumps(job).encode("utf-8")
                self.publisher.publish(self.topic_name, message, job_id=job_id)
                logging.info(f"Published job with ID {job_id} to Pub/Sub.")
            except Exception as e:
                logging.error(f"Error publishing job {job_id}: {e}")

        received_images = []

        def callback(message):
            try:
                data = json.loads(message.data.decode("utf-8"))
                job_id = data["generationId"]
                if job_id in job_ids:
                    bucket_path = f"stage-one/{job_id}/result.png"

                    img, name = self.load_image_from_bucket(bucket_path)
                    img_out, mask_out = pil2tensor(img)
                    received_images.append(img_out)

                    message.ack()
                    job_ids.remove(job_id)
                else:
                    message.ack()

            except Exception as e:
                logging.error(f"Error processing message: {e}")
                message.nack()

        # Listen for responses with timeout mechanism
        streaming_pull_future = self.subscriber.subscribe(
            self.subscription_id, callback=callback
        )
        timeout = 5  # Time to wait between checks
        max_wait_time = 300  # Total max wait time (adjust as needed)
        elapsed_time = 0

        logging.info("Listening for responses from Pub/Sub...")
        while job_ids and elapsed_time < max_wait_time:
            time.sleep(timeout)
            elapsed_time += timeout

        streaming_pull_future.cancel()  # Stop the subscription

        # Return images separately for compatibility
        return tuple(received_images)


NODE_CLASS_MAPPINGS = {
    "Genera.BatchPreviewer": BatchPreviewer,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "Genera.BatchPreviewer": "Batch Previewer",
}
