import os
import cv2
import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset
import lightning as L
import torch.nn.functional as F
from lightning.fabric.fabric import _FabricOptimizer
from lightning.fabric.loggers import TensorBoardLogger

from myyolov8.ultralytics import YOLO
from mysam.OSM.model import Model  # Your custom segmentation model
from mysam.OSM.config import cfg
class ImageSplitterDataset(Dataset):
    def __init__(self, images, imgids,tile_size=1024, overlap=204):
        h, w, _ = images[0].shape
        self.tile_size = tile_size
        self.overlap = overlap
        self.tiles_info = []  # Store (image_path, x, y, tile)
        self.images=images
        for i,(id,image) in enumerate(zip(imgids,images)):
            for y in range(0, h, tile_size - overlap):
                for x in range(0, w, tile_size - overlap):
                    self.tiles_info.append((i,id,x, y))

    def __len__(self):
        return len(self.tiles_info)

    def __getitem__(self, idx):
        imgid,id, x, y = self.tiles_info[idx]
        # image = cv2.imread(img_path)
        image=self.images[imgid]
        h, w, _ = image.shape

        tile = image[
            y: min(y + self.tile_size, h),
            x: min(x + self.tile_size, w),
        ]

        # Pad if necessary
        padded_tile = np.zeros((self.tile_size, self.tile_size, 4), dtype=np.uint8)
        padded_tile[:tile.shape[0], :tile.shape[1]] = tile
        
        return torch.tensor(padded_tile, dtype=torch.float32).permute(2, 0, 1) / 255.0, (imgid, id,x, y)
def create_dataloader(images,imgids,tile_size,overlap, batch_size=1):
    dataset = ImageSplitterDataset(images,imgids,tile_size,overlap)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False,collate_fn=collate_fn)
    return dataloader
def collate_fn(batch):
    imgsplit,metainfo= zip(*batch)
    return torch.stack(imgsplit),metainfo
class DetectAndSegmentAPI:
    def __init__(self, yolo_model_path, sam_model_path,tile_size=1024,overlap=204):
        """
        Initialize the API with YOLO and segmentation models.
        """
        
        self.yolo_model = YOLO(yolo_model_path)

        fabric = L.Fabric(accelerator="auto",
                      devices=cfg.num_devices,
                      strategy="auto",
                      loggers=[TensorBoardLogger(cfg.out_dir, name="lightning-sam")])
        fabric.launch()
        fabric.seed_everything(1337 + fabric.global_rank)
        cfg.model.checkpoint=sam_model_path
        model = Model(cfg)
        model.setup(fabric.device)
        model.eval()
        segmentation_model = model  # Load your segmentation model  
        if fabric.global_rank == 0:
            os.makedirs(cfg.out_dir, exist_ok=True)
            self.segmentation_model = segmentation_model.cuda()
            self.segmentation_model.eval()
            self.tile_size,self.overlap=tile_size,overlap


    def process_batch(self, batch_images,image_ids,tile_size=1024, overlap=204,split_batch_size=1):
        """
        Process a batch of images in tensor form to detect objects and segment them.
        
        Args:
            batch_images (np.array): Tensor of shape (batch_size, H, W, 4), normalized to [0, 255],RGB not BGR
            tile_size (int): Size of each tile for processing.
            overlap (int): Overlap between tiles.
        
        Returns:
            List of segmentation masks for each input image, in the same order.
        """
        batch_size, orig_h, orig_w,n_channel= batch_images.shape
        batch_masks = []
        dataloader=create_dataloader(batch_images,image_ids,tile_size,overlap,split_batch_size)
        combined_masks = {}
        # torch.tensor (b,c,h,w)
        for batch_images, meta_infos in dataloader:
            bb,bc,bh,bw=batch_images.shape
            batch_images = batch_images.cuda()
            with torch.no_grad():
                results = self.yolo_model.predict(batch_images[:,:3,...], save=False, save_txt=False, conf=0.25)
            bboxes = []
            class_types = []
            for result in results:
                bboxest=[]
                class_typest=[]
                for box in result.boxes:
                    bbox = box.xyxy[0]  # [x1, y1, x2, y2]
                    class_id = int(box.cls.item())
                    bboxest.append(bbox)
                    class_typest.append(class_id)
                if len(bboxest)>0:
                    bboxest= torch.stack(bboxest, dim=0)
                    bboxes.append(bboxest)
                    class_types.append(class_typest)

            # Perform Segmentation
            if len(bboxes) > 0:
                bboxes_tensor = torch.stack(bboxes, dim=0).cuda()  # (1, N, 5)
                with torch.no_grad():
                    masks, _ = self.segmentation_model(batch_images, bboxes_tensor)
            else:
                masks = torch.zeros(1, 1, tile_size, tile_size).cuda()

            # Combine the masks back into the original image
            for mask,meta_info in zip(masks,meta_infos):#mask is one split in a batch
                image_id,id,x,y=meta_info
                mask = F.sigmoid(mask)
                mask = torch.clamp(mask, min=0, max=1)
                mask = (mask >= 0.5).to(torch.uint8)
                mask= torch.any(mask, dim=0).byte()
                if id not in combined_masks:
                    combined_masks[id] = torch.zeros((orig_h, orig_w), dtype=torch.uint8).cuda()
                combined_masks[id][y: min(y + bh, orig_h), x:min(x + bw, orig_w)] = torch.maximum(
                combined_masks[id][y: min(y + bh, orig_h), x:min(x + bw, orig_w)], mask[: min( bh, orig_h-y), : min( bw, orig_w-x)] 
                )
            del batch_images

        del dataloader
        return combined_masks

    def save_masks(self, combined_masks, output_folder, filenames):
        """
        Save the segmentation masks to the output folder.
        
        Args:
            batch_masks (List[np.ndarray]): List of masks, one per input image.
            output_folder (str): Path to save the masks.
            filenames (List[str]): List of filenames for saving.
        """
        import os
        os.makedirs(output_folder, exist_ok=True)
        for filename,mask in combined_masks.items():
            save_path = os.path.join(output_folder, f"{filename}_mask.png")
            cv2.imwrite(save_path, mask.cpu().numpy()*255)
        del combined_masks

# Usage Example
if __name__ == "__main__":
    # Initialize models
    input_folder = "data_zoo/pcbcoco/images/val"
    yolo_model_path = "weight/pcbhbbyolo.pt"
    output_folder = "segout"
    sam_model_path="weight/sam_hq_vit_l.pth"
    api = DetectAndSegmentAPI(yolo_model_path,sam_model_path,tile_size=1024,overlap=204)

    # Simulate input images as tensors
    # batch_images = torch.rand(2, 3, 2048, 2048).cuda()  # Example: batch of 2 images, 2048x2048
    # filenames = ["image1", "image2"]

    rgb1=cv2.imread("data_zoo/pcbcoco/images/train/160_0_0_image3.png")
    dep1=cv2.imread("data_zoo/pcbcoco/depth/val/160_0_0_image3_depth.jpg", cv2.IMREAD_GRAYSCALE)
    rgbd1=np.concatenate([rgb1,dep1[...,np.newaxis]],axis=-1)
    rgb2=cv2.imread("data_zoo/pcbcoco/images/train/160_0_1_image3.png")
    dep2=cv2.imread("data_zoo/pcbcoco/depth/val/160_0_1_image3_depth.jpg", cv2.IMREAD_GRAYSCALE)
    rgbd2=np.concatenate([rgb2,dep2[...,np.newaxis]],axis=-1)
    batch_images=np.stack([rgbd1,rgbd2],axis=0)
    image_ids=["q1","w2"]
    # Process and save results
    masks = api.process_batch(batch_images,image_ids)
    api.save_masks(masks, output_folder="output_masks", filenames="12321")
