import glob
import json
import os
import random

import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image

from model.segment_anything.utils.transforms import ResizeLongestSide
from torchvision import transforms
from utils.dataset import Resize

def build_rsris_batches(dataroot, setname):
    im_dir1 = dataroot + '/images/'
    seg_label_dir = dataroot + '/masks/'
    if setname == 'train':
        setfile = 'output_phrase_train.txt'
    if setname == 'val':
        setfile = 'output_phrase_val.txt'
    if setname == 'test':
        setfile = 'output_phrase_test.txt'

    n_batch = 0
    train_ids = []
    tf = dataroot + '/'+setfile
    nn = 0
    imgnames = set()
    imname = 'start'
    all_imgs1 = []
    all_labels = []
    all_sentences = []

    test_sentence = []

    with open(tf,'r') as rf:
        rlines = rf.readlines()
        for idx,line in enumerate(rlines):
            lsplit = line.split(' ')
            if True:
                im_name1 = im_dir1 + lsplit[0] + '.tif'
                seg = seg_label_dir + lsplit[0] + '.tif'
                del(lsplit[0])
                if False and setname != 'train':
                    del(lsplit[-1])
                sentence = ' '.join(lsplit)
                sent = sentence

                im_1 = im_name1
                label_mask = seg
                all_imgs1.append(im_name1)
                all_labels.append(label_mask)
                all_sentences.append(sent)

    print("Dataset Loaded.")
    return all_imgs1, all_labels, all_sentences


class RefSegRSDataset(torch.utils.data.Dataset):
    # todo: check mean and std
    pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
    pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
    img_size = 1024
    ignore_label = 255

    def __init__(
        self,
        base_image_dir,
        precision: str = "fp32",
        image_size: int = 224,
        num_classes_per_sample: int = 3,
        model_type="ori",
        transform=ResizeLongestSide(1024),
    ):
        if model_type=="ori":
            assert isinstance(transform, ResizeLongestSide)
        else:
            assert isinstance(transform, Resize)
        self.model_type = model_type
        self.num_classes_per_sample = num_classes_per_sample

        self.base_image_dir = base_image_dir
        self.precision = precision
        self.transform = transform
        self.image_preprocessor = transforms.Compose([
            transforms.ToTensor(),
            transforms.Resize((image_size, image_size), interpolation=3, antialias=None), 
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])

        self.DATA_ROOT = os.path.join(base_image_dir, "refsegrs")

        all_imgs, all_labels, all_sentences = build_rsris_batches(self.DATA_ROOT, "train")
        self.images = all_imgs
        self.labels = all_labels
        self.sentences = all_sentences

    def __len__(self):
        return len(self.label_paths)

    def preprocess(self, x: torch.Tensor) -> torch.Tensor:
        """Normalize pixel values and pad to a square input."""
        # Normalize colors
        x = (x - self.pixel_mean) / self.pixel_std

        if self.model_type=="ori":
            # Pad
            h, w = x.shape[-2:]
            padh = self.img_size - h
            padw = self.img_size - w
            x = F.pad(x, (0, padw, 0, padh))
        return x

    def __getitem__(self, idx):
        number = random.choice(np.arange(0, len(self.images)))
        image_path = self.images[number]
        label_path = self.labels[number]
        sents = self.sentences[number]
        sents = [sents]

        label_mask = cv2.imread(label_path, 2)
        ref_mask = np.array(label_mask) > 50
        annot = np.zeros(ref_mask.shape)
        annot[ref_mask == 1] = 1
        annot = annot.astype(np.uint8)
        masks = annot[np.newaxis, :]

        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # preprocess image for evf
        image_evf = self.image_preprocessor(image)

        image = self.transform.apply_image(image)  # preprocess image for sam
        resize = image.shape[:2]

        image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())

        masks = torch.from_numpy(masks)
        label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label
        return (
            image_path,
            image,
            image_evf,
            masks,
            label,
            resize,
            sents,
        )
