import json
import os
import uuid
import random
import logging
from google.cloud import pubsub_v1
from google.cloud import storage
import time
import torch
from io import BytesIO
import datetime
import re
import copy
import safetensors.torch

# Set up logging
logging.basicConfig(level=logging.INFO)

workflow_file_path = os.path.join(
    os.path.dirname(os.path.abspath(__file__)), "PDP_stage_1_v1_api.json"
)
config_file_path = os.path.join(
    os.path.dirname(os.path.abspath(__file__)), "../gcp_config.json"
)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config_file_path

mapper = {
    "face_desc": "1021",
    "top_desc": "997",
    "top_2_desc": "998",
    "top_2_enable": "1383",
    "bottom_desc": "999",
    "bottom_enable": "1384",
    "shoes_desc": "1000",
    "env_prompt": "1028",
    "framing_selector": "4432",
    "framing_prompt": "4457",
    "seed": "4374",
}

class PDPStage1:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "face_description": ("STRING", {"forceInput": True}),  # Linkable input
                "top_description": ("STRING", {"forceInput": True}),
                "environment_prompt": ("STRING", {"forceInput": True}),
            },
            "optional": {
                "top2_description": ("STRING", {"forceInput": True}),
                "bottom_description": ("STRING", {"forceInput": True}),
                "shoes_description": ("STRING", {"forceInput": True}),
                "close_up_prompt": ("STRING", {"forceInput": True}),
                "mid_shot": ("STRING", {"forceInput": True}),
                "full_body": ("STRING", {"forceInput": True}),
            },
    }

    # Define each image individually
    RETURN_TYPES = ("LATENT", "STRING")
    RETURN_NAMES = ("latents", "descriptions")
    FUNCTION = "process"
    OUTPUT_NODE = False
    CATEGORY = "Genera"

    def __init__(self):
        logging.info("Initializing BatchPreviewer...")
        self.publisher = pubsub_v1.PublisherClient()
        self.subscriber = pubsub_v1.SubscriberClient()
        self.topic_name = "projects/genera-408110/topics/generaspace.stageone.jobs"
        self.subscription_id = (
            "projects/genera-408110/subscriptions/generaspace.stageone.results-sub"
        )
        storage_client = storage.Client()
        self.bucket = storage_client.bucket("space-previewer")
        logging.info("BatchPreviewer initialized successfully.")

    def load_comfy(self, file):
        try:
            # Load the data depending on file type
            if isinstance(file, str):
                data = safetensors.torch.load_file(file)
            else:
                data = safetensors.torch.load(file)

            # Validate data is loaded
            if not data:
                raise ValueError("Safetensors data is empty or invalid.")

            # Check for the latent tensor key
            if "latent_tensor" not in data:
                raise KeyError("Key 'latent_tensor' not found in safetensors data.")

            # Extract and process the latent tensor
            latent = data["latent_tensor"].to(torch.float32)

            # Apply scaling if needed
            if "latent_format_version_0" not in data:
                latent *= 1.0 / 0.18215  # XL scaling

            return latent

        except Exception as e:
            logging.error(f"Error loading safetensors file: {e}")
            return None

    def load_latent_from_bucket(self, bucket_path):
        try:
            # Fetch the file from the bucket
            blob = self.bucket.blob(bucket_path)
            latent_data = blob.download_as_bytes()

            # Convert the bytes to a file-like object
            buffer = BytesIO(latent_data)
            buffer_bytes = buffer.getvalue()  # Extract raw bytes from BytesIO

            # Load the latent file using load_comfy
            latent = self.load_comfy(buffer_bytes)
            if latent is None:
                raise ValueError("Loaded latent is None.")

            if len(latent.shape) == 3:
                latent = latent.unsqueeze(0)

            return {"samples": latent.to(torch.float32)}

        except Exception as e:
            print(f"Error loading latent from bucket: {e}")
            return None
        
    def load_description_from_bucket(self, bucket_path):
        try:
            # Fetch the file from the bucket
            blob = self.bucket.blob(bucket_path)
            text_data = blob.download_as_bytes()

            # Convert bytes to string
            file_content = text_data.decode("utf-8")
            return file_content

        except Exception as e:
            print(f"Error loading description from bucket: {e}")
            return None

    def create_signed_url(
        self, file_name: str, content_type: str, expiration: int = 3600
    ) -> str:
        try:
            blob = self.bucket.blob(file_name)

            # Set expiration time
            expiration_time = datetime.timedelta(seconds=expiration)

            # Generate signed URL
            url = blob.generate_signed_url(
                version="v4",
                expiration=expiration_time,
                method="PUT",
                content_type=content_type,
            )

            return url
        except Exception as error:
            print("Error generating signed upload URL:", error)
            raise RuntimeError("Failed to generate signed upload URL.")

    def process(
        self,
        face_description,
        top_description,
        environment_prompt,
        top2_description=None,
        bottom_description=None,
        shoes_description=None,
        close_up_prompt=None,
        mid_shot=None,
        full_body=None,
    ):
        # Define a regex pattern for valid framing inputs
        valid_framing_pattern = re.compile(r"^(\d+)(?::(.+))?$")

        # Validate framing inputs are not all None
        if close_up_prompt is None and mid_shot is None and full_body is None:
            raise ValueError(
                "Invalid framing input: At least one of 'close_up_prompt', 'mid_shot', or 'full_body' must be provided."
            )

        # Validate and process each framing input
        shots = []
        # Map the shot_type to each framing input
        shot_type_map = {
            "close_up_prompt": "1",
            "mid_shot": "2",
            "full_body": "3",
        }

        for framing_input, name in [
            (close_up_prompt, "close_up_prompt"),
            (mid_shot, "mid_shot"),
            (full_body, "full_body"),
        ]:
            if framing_input:
                match = valid_framing_pattern.match(framing_input)
                if not match:
                    raise ValueError(
                        f"Invalid format for {name}: '{framing_input}' must be a string number or in the format 'number:info'."
                    )

                # Extract number of shots and optional description
                num_shots = int(match.group(1))
                shot_info = match.group(2) or ""
                shot_type = shot_type_map[name]  # Determine shot_type from the map

                # Generate the array of shots
                for _ in range(num_shots):
                    shot = {
                        "id": str(uuid.uuid4()),
                        "info": shot_info,
                        "type": shot_type,
                        "seed": random.randint(1, 999_999_999),
                    }
                    logging.info(
                        f"Shot ID: {shot['id']}, Type: {shot['type']}, Info: {shot['info']}, Seed: {shot['seed']}"
                    )
                    shots.append(shot)

        logging.info("Processing job with prompt.")

        # Load workflow from JSON file
        try:
            with open(workflow_file_path) as f:
                base_workflow = json.load(f)
            logging.info("Loaded base workflow successfully.")
        except Exception as e:
            logging.error(f"Error loading workflow: {e}")
            return []

        jobs = []
        job_ids = set()

        # Modify workflow per seed
        for shot in shots:
            workflow = copy.deepcopy(base_workflow)

            workflow[mapper["face_desc"]]["inputs"]["String"] = face_description
            workflow[mapper["top_desc"]]["inputs"]["String"] = top_description

            if top2_description:
                workflow[mapper["top_2_desc"]]["inputs"]["String"] = top2_description
                workflow[mapper["top_2_enable"]]["inputs"]["value"] = True
            else:
                workflow[mapper["top_2_enable"]]["inputs"]["value"] = False

            if bottom_description:
                workflow[mapper["bottom_desc"]]["inputs"]["String"] = bottom_description
                workflow[mapper["bottom_enable"]]["inputs"]["value"] = True
            else:
                workflow[mapper["bottom_enable"]]["inputs"]["value"] = False

            if shoes_description:
                workflow[mapper["shoes_desc"]]["inputs"]["String"] = shoes_description

            workflow[mapper["env_prompt"]]["inputs"]["string"] = environment_prompt
            workflow[mapper["framing_selector"]]["inputs"]["Number"] = shot["type"]
            workflow[mapper["seed"]]["inputs"]["noise_seed"] = shot["seed"]
            
            if shot["info"]:
                framing_string = workflow[mapper["framing_prompt"]]["inputs"]["String"]
                lines = framing_string.split("\n")

                if shot["type"] == "1":
                    # Append info to CLOSEUP SHOT
                    lines[0] = f"{lines[0]} {shot['info']}".strip()
                elif shot["type"] == "2":
                    # Append info to MID SHOT
                    lines[1] = f"{lines[1]} {shot['info']}".strip()
                elif shot["type"] == "3":
                    # Append info to FULL BODY SHOT
                    lines[2] = f"{lines[2]} {shot['info']}".strip()

                updated_framing_string = "\n".join(lines)
                workflow[mapper["framing_prompt"]]["inputs"]["String"] = updated_framing_string

            latent_url = self.create_signed_url(
                f"stage-one/{shot["id"]}/image.latent", "application/octet-stream"
            )
            description_url = self.create_signed_url(
                f"stage-one/{shot["id"]}/description.txt", "text/plain"
            )

            job = {
                "id": shot["id"],
                "name": "pdp:stage-1",
                "result": {"latent": latent_url, "description": description_url},
                "data": {"client_id": str(uuid.uuid4()), "prompt": workflow},
            }

            sample_file_path = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "job_sample.json"
            )
            try:
                with open(sample_file_path, "w") as json_file:
                    json.dump(job, json_file, indent=4)
                logging.info(f"Saved job sample to {sample_file_path}")
            except Exception as e:
                logging.error(f"Failed to save job sample: {e}")

            jobs.append(job)
            job_ids.add(job["id"])


            # Publish job to Pub/Sub
            try:
                message = json.dumps(job).encode("utf-8")
                self.publisher.publish(self.topic_name, message, job_id=shot["id"])
                logging.info(f"Published job with ID {shot["id"]} to Pub/Sub.")
            except Exception as e:
                logging.error(f"Error publishing job {shot["id"]}: {e}")

        latents = []
        descriptions = []

        def callback(message):
            try:
                data = json.loads(message.data.decode("utf-8"))
                logging.info(f"On message {data}.")
                job_id = data["id"]
                if job_id in job_ids:
                    latent_path = f"stage-one/{job_id}/image.latent"
                    latent = self.load_latent_from_bucket(latent_path)
                    latents.append(latent)

                    description_path = f"stage-one/{job_id}/description.txt"
                    description = self.load_description_from_bucket(description_path)
                    descriptions.append(description)

                    message.ack()
                    job_ids.remove(job_id)
                else:
                    message.ack()

            except Exception as e:
                logging.error(f"Error processing message: {e}")
                message.nack()

        # Listen for responses with timeout mechanism
        streaming_pull_future = self.subscriber.subscribe(
            self.subscription_id, callback=callback
        )
        timeout = 5  # Time to wait between checks
        max_wait_time = 300  # Total max wait time (adjust as needed)
        elapsed_time = 0

        logging.info("Listening for responses from Pub/Sub...")
        while job_ids and elapsed_time < max_wait_time:
            time.sleep(timeout)
            elapsed_time += timeout

        streaming_pull_future.cancel()  # Stop the subscription

        latents_out = [l["samples"] for l in latents if "samples" in l]
        descriptions_out = descriptions if descriptions else [""]

        return latents_out, descriptions_out
