from torch.utils import data
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
from PIL import Image
import torch
import os
import random

label_templates = [
    'a photo of {} style.',
    'a drawing of {} style.',
    'style of {} in a photo.',
    'the photo describes a {} style.',
    '{} style.',
    'the drawing is style of {}.',
    'the photo is {} style.',
    '{} drawing.'
]


def style_collate_fn(data):
    cimgs, simgs, txts = [], [], []
    for cimg, simg, txt in data:
        cimgs.append(cimg.view(1, cimg.size(0), cimg.size(1), cimg.size(2)))
        simgs.append(simg.view(1, simg.size(0), simg.size(1), simg.size(2)))
        txts.append(txt)
    return torch.cat(cimgs, dim=0), torch.cat(simgs, dim=0), txts


class StyleClipDataset(data.Dataset):
    def __init__(self, style_image_dir, style_filepath, content_image_dir, content_filepath, transform):
        self.style_image_dir = style_image_dir
        self.content_image_dir = content_image_dir
        self.transform = transform
        self.style_data, self.content_data = {}, []

        with open(style_filepath) as f:
            for line in f.readlines():
                filename, style_txt = line.strip().split(';') # for distinguish with , in txt
                if style_txt not in self.style_data:
                    self.style_data[style_txt] = []
                self.style_data[style_txt].append(filename)
        self.num_style_images = len(self.style_data)

        with open(content_filepath) as f:
            for line in f.readlines():
                line = line.strip()
                self.content_data.append(line)
        self.num_content_images = len(self.content_data)

    def __len__(self):
        return self.num_content_images

    def __getitem__(self, index):
        content_filename = self.content_data[index]
        content_image = Image.open(os.path.join(self.content_image_dir, content_filename)).convert('RGB')
        style_txt = random.choice(list(self.style_data.keys()))
        style_filename = random.choice(self.style_data[style_txt])
        style_image = Image.open(os.path.join(self.style_image_dir, style_filename)).convert('RGB')
        template = random.choice(label_templates)
        style_txt = template.format(style_txt)
        return self.transform(content_image), self.transform(style_image), style_txt


def get_loader(style_image_dir, style_filepath, content_image_dir, content_filepath, crop_size=256, image_size=224, 
               batch_size=16, mode='train', num_workers=1):
    """Build and return a data loader."""
    transform = []
    if mode == 'train':
        transform.append(T.RandomHorizontalFlip())
        transform.append(T.CenterCrop(crop_size))
    transform.append(T.Resize(image_size))
    transform.append(T.ToTensor())
    # transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    # same as clip model's preprocess
    transform.append(T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)))
    transform = T.Compose(transform)

    dataset = StyleClipDataset(style_image_dir, style_filepath, content_image_dir, content_filepath, transform)

    data_loader = data.DataLoader(dataset=dataset,
                                  batch_size=batch_size,
                                  shuffle=(mode=='train'),
                                  num_workers=num_workers,
                                  collate_fn=style_collate_fn)
    return data_loader