from PIL import Image
import torch.nn as nn
import torch
from torchvision import transforms
from transformers import LlamaTokenizer
from typing import Any, Dict, List, Optional, Union
from my_py_toolkit.mllms.models.seed2.seed_qformer.qformer_quantizer import Blip2QformerQuantizer
from my_py_toolkit.mllms.models.seed2.pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline
from my_py_toolkit.mllms.utils import modality_tokens_ids_to_string, modality_ids_to_tokens
from my_py_toolkit.decorator.decorator import fn_timer
import os

"""
Image tokenizer


Speed (200服务器，机器慢):
batch | GPU | speed(s)
1|3G | 3.7s
36| | 0.92s
50|12G|
80|19G|2.278
100|27G|1.66S / 3.7s| 400W 数据预计 19h 处理完
600|13G|
9k|26G+|16s | 


log:
0
Total time running cvt2token: 3.716559648513794 seconds
100
Total time running cvt2token: 6.882885217666626 seconds
200
Total time running cvt2token: 8.323652029037476 seconds
300
Total time running cvt2token: 8.36314058303833 seconds
400
Total time running cvt2token: 10.958420038223267 seconds
500
Total time running cvt2token: 10.830089330673218 seconds
600
Total time running cvt2token: 13.44407868385315 seconds
700
Total time running cvt2token: 13.529032707214355 seconds
800
Total time running cvt2token: 16.134167671203613 seconds
900
Total time running cvt2token: 16.014801263809204 seconds

"""

model_name_class_mapping = {
    'seed': Blip2QformerQuantizer,
    'stable_unclip': StableUnCLIPImg2ImgPipeline
}

class ImageTokenizerGeneral(nn.Module):
    def __init__(self,
                 img2token_model_path,
                 img2token_model_cls_or_name=None,
                 token2img_model_path=None,
                 token2img_model_cls_or_name=None,
                 load_text2img_model=False,
                 image_size=224,
                 shape_latents=[1, 4, 96, 96],
                 shape_noise=[1, 1024],
                 device='cuda',
                 fp16=True,
                 **kwargs):
        super().__init__()
        # from .seed_qformer.qformer_quantizer import Blip2QformerQuantizer
        # proxies = {
        #     "https_proxy": "http://zhanjun.p:Zj20220901.@10.1.8.50:33128/",
        #     "http_proxy": "http://zhanjun.p:Zj20220901.@10.1.8.50:33128/",
        # }
        if isinstance(img2token_model_cls_or_name, str) and img2token_model_cls_or_name in model_name_class_mapping:
            img2token_model_cls_or_name = model_name_class_mapping[img2token_model_cls_or_name]
        if isinstance(token2img_model_cls_or_name, str) and token2img_model_cls_or_name in model_name_class_mapping:
            token2img_model_cls_or_name = model_name_class_mapping[token2img_model_cls_or_name]

        model = img2token_model_cls_or_name.from_pretrained(pretrained_model_path=img2token_model_path,
                                                      vit_precision='fp16' if fp16 else 'fp32',
                                                      **kwargs).eval()
        if token2img_model_path is not None and load_text2img_model:
            # diffusion_model = DiffusionPipeline.from_pretrained(diffusion_model_path,
            #                                                     torch_dtype=torch.float16 if fp16 else torch.float32)
            token2img_model = token2img_model_cls_or_name.from_pretrained(token2img_model_path,
                                                                          torch_dtype=torch.float16 if fp16 else torch.float32)
            self.token2img_model = token2img_model.to(device)
        else:
            self.token2img_model = None

        model = model.to(device)

        processor = transforms.Compose([
            transforms.Resize((image_size, image_size), interpolation=3),
            # transforms.Resize(image_size, interpolation=3),
            # transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
        ])

        if fp16:
            model = model.half()

        shape_latents = torch.Size(shape_latents)
        self.latents = torch.randn(shape_latents, generator=None, device=device, dtype=torch.float16, layout=torch.strided)

        shape_noise = torch.Size(shape_noise)
        self.noise = torch.randn(shape_noise, generator=None, device=device, dtype=torch.float16, layout=torch.strided)

        self.model = model
        self.processor = processor
        self.device = device
        self.fp16 = fp16

    def __len__(self):
        return self.model.n_embed
    
    def load_img(self, image_path):
        image_pil = Image.open(image_path).convert('RGB')
        image_torch = self.processor(image_pil)
        image_torch = image_torch.to(self.device)
        return image_torch
    
    @fn_timer()
    def load_batch_img(self, image_paths):
        res = []
        for image_path in image_paths:
            image_pil = Image.open(image_path).convert('RGB')
            image_torch = self.processor(image_pil)
            image_torch = image_torch.to(self.device)
            res.append(image_torch)
        res = torch.stack(res, dim=0)
        return res

    def encode(self, image_torch):
        '''Convert a batch of img to code
        Args:
            model: The tokenizer model.
            img: [b, c, h, w] or image_path
        '''
        # image 传路径
        if isinstance(image_torch, str):
            image_torch = self.load_img(image_torch)
        elif isinstance(image_torch, (list, tuple)):
            image_torch = self.load_batch_img(image_torch)

        if len(image_torch.shape) == 3:
            image_torch = image_torch.unsqueeze(0)

        # img = image_torch.to(self.device)
        img = image_torch
        if self.fp16:
            img = img.half()
        with torch.no_grad():
            id, _ = self.model.get_codebook_indices(img)
        return id.view(img.shape[0], -1)
    
    def encode_tokens(self, image_torch):
        '''Convert a batch of img to code
        Args:
            model: The tokenizer model.
            img: [b, c, h, w] or image_path
        '''
        tokens = []
        token_ids = self.encode(image_torch)
        batch_size, length = token_ids.shape
        for i in range(batch_size):
            tokens.append(modality_ids_to_tokens(token_ids[i], modality='image'))
        if len(tokens) == 1:
            tokens = tokens[0]
        return tokens
    
    def encode_img2text(self, image_torch):
        '''Convert a batch of img to code
        Args:
            model: The tokenizer model.
            img: [b, c, h, w] or image_path
        '''

        strs = []
        token_ids = self.encode(image_torch)
        batch_size, length = token_ids.shape
        for i in range(batch_size):
            strs.append(modality_tokens_ids_to_string(token_ids[i], modality='image'))
        if len(strs) == 1:
            strs = strs[0]
        return strs



    def decode(self, indices, negative_indices=None, guidance_scale=10, num_inference_steps=20):
        image_embeds = self.model.get_codebook_entry(indices)
        # image = self.diffusion_model(image_embeds=image_embed,
        #                              noise_level=0,
        #                              num_inference_steps=20,
        #                              latents=self.latents,
        #                              noise=self.noise).images
        if negative_indices is not None:
            assert indices.shape == negative_indices.shape, 'Negative indices must have the same shape with indices'
            negative_image_embeds = self.model.get_codebook_entry(negative_indices)
        else:
            negative_image_embeds = None

        image = self.token2img_model(
            image_embeds=image_embeds,
            negative_image_embeds=negative_image_embeds,
            guidance_scale=guidance_scale,
            noise_level=0,
            num_inference_steps=num_inference_steps,
            latents=self.latents,
        ).images
        return image


class SeedLlamaTokenizer(LlamaTokenizer):
    def __init__(self,
                 vocab_file,
                 unk_token="<unk>",
                 bos_token="<s>",
                 eos_token="</s>",
                 pad_token=None,
                 sp_model_kwargs: Optional[Dict[str, Any]] = None,
                 add_bos_token=True,
                 add_eos_token=False,
                 clean_up_tokenization_spaces=False,
                 device='cuda',
                 fp16=True,
                 load_text2img_model=False,
                 img2token_model_path=None,
                 img2token_model_cls=None,
                 token2img_model_path=None,
                 token2img_model_cls=None,
                 **kwargs):
        super().__init__(vocab_file, unk_token, bos_token, eos_token, pad_token, sp_model_kwargs, add_bos_token, add_eos_token,
                         clean_up_tokenization_spaces, **kwargs)
        self.device = device
        self.fp16 = fp16
        self.pad_token = self.unk_token
        self.load_text2img_model = load_text2img_model
        self.img2token_model_path = img2token_model_path
        self.img2token_model_cls = img2token_model_cls
        self.token2img_model_path = token2img_model_path
        self.token2img_model_cls = token2img_model_cls
        
        self.load_image_tokenizer()

    def load_image_tokenizer(self):
        if not hasattr(self, '_image_tokenizer'):
            if self.img2token_model_path is not None:
                img2token_model_path = self.img2token_model_path
            else:
                raise Exception('img2token_model_path is None')
                # assert hasattr(self, 'name_or_path') and os.path.exists(self.name_or_path)
                # model_path = os.path.join(self.name_or_path, WEIGHTS_NAME)
            # diffusion_model_path = os.path.join(self.name_or_path, DIFFUSION_NAME)
            # diffusion_model_path = 'stabilityai/stable-diffusion-2-1-unclip'
            self._image_tokenizer = ImageTokenizerGeneral(img2token_model_path=img2token_model_path,
                                                   img2token_model_cls_or_name=self.img2token_model_cls,
                                                   token2img_model_path=self.token2img_model_path,
                                                   token2img_model_cls_or_name=self.token2img_model_cls,
                                                   load_text2img_model=self.load_text2img_model,
                                                   device=self.device,
                                                   fp16=self.fp16)

    @property
    def image_tokenizer(self):
        if not hasattr(self, '_image_tokenizer'):
            if self.img2token_model_path is not None:
                img2token_model_path = self.img2token_model_path
            else:
                raise Exception('encoder_url is None')
                # assert hasattr(self, 'name_or_path') and os.path.exists(self.name_or_path)
                # model_path = os.path.join(self.name_or_path, WEIGHTS_NAME)
            # diffusion_model_path = os.path.join(self.name_or_path, DIFFUSION_NAME)
            # diffusion_model_path = 'stabilityai/stable-diffusion-2-1-unclip'
            self._image_tokenizer = ImageTokenizerGeneral(img2token_model_path=img2token_model_path,
                                                   img2token_model_cls_or_name=self.img2token_model_cls,
                                                   token2img_model_path=self.token2img_model_path,
                                                   token2img_model_cls_or_name=self.token2img_model_cls,
                                                   load_text2img_model=self.load_text2img_model,
                                                   device=self.device,
                                                   fp16=self.fp16)
        return self._image_tokenizer

    @property
    def num_image_tokens(self):
        return 8192  # self.image_tokenizer.num_tokens # allow not load

    def to(self, device):
        self.device = device
        if hasattr(self, '_image_tokenizer'):
            self._image_tokenizer.to(device=device)

    def encode_image(
        self,
        image_path=None,
        image_pil=None,
        image_torch=None,
        image_size: int = 224,
    ):
        assert (image_path is None) + (image_pil is None) + (image_torch is None) == 2

        # need_norm_to_1 = False
        if image_path is not None:
            image_pil = Image.open(image_path).convert('RGB')

        if image_pil is not None:
            image_torch = self.image_tokenizer.processor(image_pil)

            image_torch = image_torch.to(self.device)
        return self.image_tokenizer.encode(image_torch)

    def decode_image(self, indices, negative_indices=None, guidance_scale=10):
        indices = indices.to(self.device)
        if negative_indices is not None:
            negative_indices = negative_indices.to(self.device)
        image = self.image_tokenizer.decode(
            indices,
            negative_indices=negative_indices,
            guidance_scale=guidance_scale,
        )
        return image

# ==============================  dataset
from torch.utils.data import Dataset
from my_py_toolkit.file.file_toolkit import *
from PIL import Image
from torch.utils import data

class ImageDataset(Dataset):
    def __init__(self, data_dir, processor, device):
        super().__init__()
        # self.files = get_file_paths(data_dir, ['jpg'])
        # debug
        self.files = ['/home/centos/ll/code/test/1.jpg' for i in range(2000)]
        self.device = device
        self.processor = processor
    
    def __getitem__(self, idx):
        image_path = self.files[idx]
        image_pil = Image.open(image_path).convert('RGB')
        image_torch = self.processor(image_pil)
        image_torch = image_torch.to(self.device)
        return image_path, image_torch
    
    def __len__(self):
        return len(self.files)

def test_img2token():
    img2token_model_path = '/home/centos/.cache/huggingface/hub/models--AILab-CVC--seed-tokenizer-2/snapshots/c6d957a9b280d9ed3879b1eb5d99036cf8390012/seed_quantizer.pt'
    img2token_model_cls_or_name = 'seed'
    token2img_model_path = '/home/centos/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-unclip/snapshots/e99f66a92bdcd1b0fb0d4b6a9b81b3b37d8bea44'
    token2img_model_cls_or_name = 'stable_unclip'
    img_path = '/home/centos/ll/code/test/1.jpg'
    img_path = [img_path, img_path]
    tokenizer = ImageTokenizerGeneral(img2token_model_path, img2token_model_cls_or_name, token2img_model_path, token2img_model_cls_or_name)
    img_str = tokenizer.encode_img2text(img_path)
    print(img_str)

@fn_timer()
def cvt2token(tokenizer, img_path):
    img_str = tokenizer.encode_img2text(img_path)
    return img_str

def test_img2token_speed():
    img2token_model_path = '/home/centos/.cache/huggingface/hub/models--AILab-CVC--seed-tokenizer-2/snapshots/c6d957a9b280d9ed3879b1eb5d99036cf8390012/seed_quantizer.pt'
    img2token_model_cls_or_name = 'seed'
    token2img_model_path = '/home/centos/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-unclip/snapshots/e99f66a92bdcd1b0fb0d4b6a9b81b3b37d8bea44'
    token2img_model_cls_or_name = 'stable_unclip'
    img_path = '/home/centos/ll/code/test/1.jpg'
    tokenizer = ImageTokenizerGeneral(img2token_model_path, img2token_model_cls_or_name, token2img_model_path, token2img_model_cls_or_name)
    for i in range(0, 1100, 100):
        print(i)
        if i==0:
            i += 1
        img_paths = [img_path for _ in range(i)]
        img_str = cvt2token(tokenizer, img_paths)


@fn_timer()
def cvt_with_dataset(tokenizer, train_loader):
    for image_paths, image_torch in train_loader:
        img_str = tokenizer.encode_img2text(image_torch)
        return img_str

def test_img2token_speed_dataset():
    img2token_model_path = '/home/centos/.cache/huggingface/hub/models--AILab-CVC--seed-tokenizer-2/snapshots/c6d957a9b280d9ed3879b1eb5d99036cf8390012/seed_quantizer.pt'
    img2token_model_cls_or_name = 'seed'
    token2img_model_path = '/home/centos/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-unclip/snapshots/e99f66a92bdcd1b0fb0d4b6a9b81b3b37d8bea44'
    token2img_model_cls_or_name = 'stable_unclip'
    img_path = '/home/centos/ll/code/test/1.jpg'
    data_dir = ''
    device = 'cuda'
    num_workers = 4
    tokenizer = ImageTokenizerGeneral(img2token_model_path, img2token_model_cls_or_name, token2img_model_path, token2img_model_cls_or_name)

    train_set = ImageDataset(data_dir, tokenizer.processor, device)
    # define training sampler
    train_sampler = data.distributed.DistributedSampler(train_set)
    


    
    for i in range(0, 1100, 100):
        bs = max(1, i)
        # wrapped with data loader
        train_loader = data.DataLoader(train_set, shuffle=False,
                                            sampler=train_sampler,
                                            num_workers=num_workers,
                                            batch_size=bs)
        
        cvt_with_dataset(tokenizer, train_loader)



        # print(img_str)


def get_tokenizer():
    img2token_model_path = '/home/centos/.cache/huggingface/hub/models--AILab-CVC--seed-tokenizer-2/snapshots/c6d957a9b280d9ed3879b1eb5d99036cf8390012/seed_quantizer.pt'
    img2token_model_cls_or_name = 'seed'
    token2img_model_path = '/home/centos/.cache/huggingface/hub/models--stabilityai--stable-diffusion-2-1-unclip/snapshots/e99f66a92bdcd1b0fb0d4b6a9b81b3b37d8bea44'
    token2img_model_cls_or_name = 'stable_unclip'
    img_path = '/home/centos/ll/code/test/1.jpg'
    data_dir = ''
    device = 'cuda'
    num_workers = 4
    tokenizer = ImageTokenizerGeneral(img2token_model_path, img2token_model_cls_or_name, token2img_model_path, token2img_model_cls_or_name)
    return tokenizer

def test_dataset():
    device = 'cuda'
    tokenizer = get_tokenizer()
    train_set = ImageDataset('', tokenizer.processor, 'cuda')
    # define training sampler
    train_loader = data.DataLoader(train_set, shuffle=False,
                                            sampler=None,
                                            num_workers=4,
                                            batch_size=2) 
    

    for item in train_loader:
        print(item)


if __name__ == '__main__':
    # test_img2token_speed_dataset()
    test_dataset()