# Prediction interface for Cog ⚙️
# Reference: https://github.com/replicate/cog/blob/main/docs/python.md

import clip
import os
from tqdm import tqdm
from torch import nn
import numpy as np
import torch
import torch.nn.functional as nnf
import sys
from typing import Tuple, List, Union, Optional
from transformers import (
    GPT2Tokenizer,
    GPT2LMHeadModel,
    AdamW,
    get_linear_schedule_with_warmup,
)
import skimage.io as io
import PIL.Image
import pickle

from self_model import *
from torchinfo import summary
import matplotlib.pyplot as plt
from PIL import Image
import textwrap # 自动换行

data_dir = './data/coco'
export_dir = './experiment'

# import cog

# # import torch

# N = type(None)
# V = np.array
# ARRAY = np.ndarray
# ARRAYS = Union[Tuple[ARRAY, ...], List[ARRAY]]
# VS = Union[Tuple[V, ...], List[V]]
# VN = Union[V, N]
# VNS = Union[VS, N]
# T = torch.Tensor
# TS = Union[Tuple[T, ...], List[T]]
# TN = Optional[T]
# TNS = Union[Tuple[TN, ...], List[TN]]
# TSN = Optional[TS]
# TA = Union[T, ARRAY]

# WEIGHTS_PATHS = {
#     "coco": "coco_weights.pt",
#     "conceptual-captions": "conceptual_weights.pt",
# }

# D = torch.device
# CPU = torch.device("cpu")


# class Predictor(cog.Predictor):
#     def setup(self):
#         """Load the model into memory to make running multiple predictions efficient"""
#         self.device = torch.device("cuda")
#         self.clip_model, self.preprocess = clip.load(
#             "ViT-B/32", device=self.device, jit=False
#         )
#         self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

#         self.models = {}
#         self.prefix_length = 10
#         for key, weights_path in WEIGHTS_PATHS.items():
#             model = ClipCaptionModel(self.prefix_length)
#             model.load_state_dict(torch.load(weights_path, map_location=CPU))
#             model = model.eval()
#             model = model.to(self.device)
#             self.models[key] = model

#     @cog.input("image", type=cog.Path, help="Input image")
#     @cog.input(
#         "model",
#         type=str,
#         options=WEIGHTS_PATHS.keys(),
#         default="coco",
#         help="Model to use",
#     )
#     @cog.input(
#         "use_beam_search",
#         type=bool,
#         default=False,
#         help="Whether to apply beam search to generate the output text",
#     )
#     def predict(self, image, model, use_beam_search):
#         """Run a single prediction on the model"""
#         image = io.imread(image)
#         model = self.models[model]
#         pil_image = PIL.Image.fromarray(image)
#         image = self.preprocess(pil_image).unsqueeze(0).to(self.device)
#         with torch.no_grad():
#             prefix = self.clip_model.encode_image(image).to(
#                 self.device, dtype=torch.float32
#             )
#             prefix_embed = model.clip_project(prefix).reshape(1, self.prefix_length, -1)
#         if use_beam_search:
#             return generate_beam(model, self.tokenizer, embed=prefix_embed)[0]
#         else:
#             return generate2(model, self.tokenizer, embed=prefix_embed)


# class MLP(nn.Module):
#     def forward(self, x: T) -> T:
#         return self.model(x)

#     def __init__(self, sizes: Tuple[int, ...], bias=True, act=nn.Tanh):
#         super(MLP, self).__init__()
#         layers = []
#         for i in range(len(sizes) - 1):
#             layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))
#             if i < len(sizes) - 2:
#                 layers.append(act())
#         self.model = nn.Sequential(*layers)


# class ClipCaptionModel(nn.Module):

#     # @functools.lru_cache #FIXME
#     def get_dummy_token(self, batch_size: int, device: D) -> T:
#         return torch.zeros(
#             batch_size, self.prefix_length, dtype=torch.int64, device=device
#         )

#     def forward(
#         self, tokens: T, prefix: T, mask: Optional[T] = None, labels: Optional[T] = None
#     ):
#         embedding_text = self.gpt.transformer.wte(tokens)
#         prefix_projections = self.clip_project(prefix).view(
#             -1, self.prefix_length, self.gpt_embedding_size
#         )
#         # print(embedding_text.size()) #torch.Size([5, 67, 768])
#         # print(prefix_projections.size()) #torch.Size([5, 1, 768])
#         embedding_cat = torch.cat((prefix_projections, embedding_text), dim=1)
#         if labels is not None:
#             dummy_token = self.get_dummy_token(tokens.shape[0], tokens.device)
#             labels = torch.cat((dummy_token, tokens), dim=1)
#         out = self.gpt(inputs_embeds=embedding_cat, labels=labels, attention_mask=mask)
#         return out

#     def __init__(self, prefix_length: int, prefix_size: int = 512):
#         super(ClipCaptionModel, self).__init__()
#         self.prefix_length = prefix_length
#         self.gpt = GPT2LMHeadModel.from_pretrained("gpt2")
#         self.gpt_embedding_size = self.gpt.transformer.wte.weight.shape[1]
#         if prefix_length > 10:  # not enough memory
#             self.clip_project = nn.Linear(
#                 prefix_size, self.gpt_embedding_size * prefix_length
#             )
#         else:
#             self.clip_project = MLP(
#                 (
#                     prefix_size,
#                     (self.gpt_embedding_size * prefix_length) // 2,
#                     self.gpt_embedding_size * prefix_length,
#                 )
#             )


# class ClipCaptionPrefix(ClipCaptionModel):
#     def parameters(self, recurse: bool = True):
#         return self.clip_project.parameters()

#     def train(self, mode: bool = True):
#         super(ClipCaptionPrefix, self).train(mode)
#         self.gpt.eval()
#         return self


def generate_beam(
    model,
    tokenizer,
    beam_size: int = 5,
    prompt=None,
    embed=None,
    entry_length=67,
    temperature=1.0,
    stop_token: str = ".",
):

    # 对一张图片进行束搜索式生成描述
    model.eval()
    # 将字符串转变成词元ID的list
    stop_token_index = tokenizer.encode(stop_token)[0]
    print(stop_token_index)
    tokens = None
    scores = None
    device = next(model.parameters()).device
    seq_lengths = torch.ones(beam_size, device=device)
    is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool)
    with torch.no_grad():
        if embed is not None:
            generated = embed
        else:
            if tokens is None:
                tokens = torch.tensor(tokenizer.encode(prompt))
                tokens = tokens.unsqueeze(0).to(device)
                generated = model.gpt.transformer.wte(tokens)
        for i in range(entry_length):
            outputs = model.gpt(inputs_embeds=generated)
            logits = outputs.logits
            logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
            logits = logits.softmax(-1).log()
            if scores is None:
                # 找出概率最大的beam_size个，scores是对应的sofmax后取log的值，tokens是对应的索引下标
                scores, next_tokens = logits.topk(beam_size, -1)
                # generated第一个维度从一张图片的1变为beam_size
                generated = generated.expand(beam_size, *generated.shape[1:])
                """这里的next_tokens和scores变成二维的了吗? 怎么变得, 之前logits不是三维的吗?"""
                """解答: 上面logits[:, -1, :]之后会变成二维的"""
                next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0)
                if tokens is None:
                    tokens = next_tokens
                # else:
                #     # *tokens.shape[1:]将形状元组，从第二个元素开始逐个取出作为参数
                #     tokens = tokens.expand(beam_size, *tokens.shape[1:])
                #     tokens = torch.cat((tokens, next_tokens), dim=1)
            else:
                logits[is_stopped] = -float(np.inf)
                logits[is_stopped, 0] = 0
                scores_sum = scores[:, None] + logits
                seq_lengths[~is_stopped] += 1
                scores_sum_average = scores_sum / seq_lengths[:, None]
                scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(
                    beam_size, -1
                )
                next_tokens_source = next_tokens // scores_sum.shape[1]
                seq_lengths = seq_lengths[next_tokens_source]
                next_tokens = next_tokens % scores_sum.shape[1]
                next_tokens = next_tokens.unsqueeze(1)
                tokens = tokens[next_tokens_source]
                tokens = torch.cat((tokens, next_tokens), dim=1) # [beam_size, i + 1]
                generated = generated[next_tokens_source]
                scores = scores_sum_average * seq_lengths
                is_stopped = is_stopped[next_tokens_source]
            next_token_embed = model.gpt.transformer.wte(next_tokens.squeeze()).view(
                generated.shape[0], 1, -1
            )
            generated = torch.cat((generated, next_token_embed), dim=1)
            is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze()
            if is_stopped.all():
                break
    scores = scores / seq_lengths
    output_list = tokens.cpu().numpy()
    print(output_list)
    print(seq_lengths)
    output_texts = [
        tokenizer.decode(output[: int(length)])
        for output, length in zip(output_list, seq_lengths)
    ]
    order = scores.argsort(descending=True)
    output_texts = [output_texts[i] for i in order]
    return output_texts


# def generate2(
#     model,
#     tokenizer,
#     tokens=None,
#     prompt=None,
#     embed=None,
#     entry_count=1,
#     entry_length=67,  # maximum number of words
#     top_p=0.8,
#     temperature=1.0,
#     stop_token: str = ".",
# ):
#     model.eval()
#     generated_num = 0
#     generated_list = []
#     stop_token_index = tokenizer.encode(stop_token)[0]
#     filter_value = -float("Inf")
#     device = next(model.parameters()).device

#     with torch.no_grad():

#         for entry_idx in range(entry_count):
#             if embed is not None:
#                 generated = embed
#             else:
#                 if tokens is None:
#                     tokens = torch.tensor(tokenizer.encode(prompt))
#                     tokens = tokens.unsqueeze(0).to(device)

#                 generated = model.gpt.transformer.wte(tokens)

#             for i in range(entry_length):

#                 outputs = model.gpt(inputs_embeds=generated)
#                 logits = outputs.logits
#                 logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
#                 sorted_logits, sorted_indices = torch.sort(logits, descending=True)
#                 cumulative_probs = torch.cumsum(    # 累加函数，就是求前缀和
#                     nnf.softmax(sorted_logits, dim=-1), dim=-1
#                 )
#                 sorted_indices_to_remove = cumulative_probs > top_p
#                 sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[
#                     ..., :-1
#                 ].clone()
#                 sorted_indices_to_remove[..., 0] = 0

#                 indices_to_remove = sorted_indices[sorted_indices_to_remove]
#                 logits[:, indices_to_remove] = filter_value
#                 next_token = torch.argmax(logits, -1).unsqueeze(0)
#                 next_token_embed = model.gpt.transformer.wte(next_token)
#                 if tokens is None:
#                     tokens = next_token
#                 else:
#                     tokens = torch.cat((tokens, next_token), dim=1)
#                 generated = torch.cat((generated, next_token_embed), dim=1)
#                 if stop_token_index == next_token.item():
#                     break

#             output_list = list(tokens.squeeze().cpu().numpy())
#             output_text = tokenizer.decode(output_list)
#             generated_list.append(output_text)

#     return generated_list[0]


def load_model(model, weight_path, device='cpu'):
    if not os.path.isfile(weight_path):
        raise FileNotFoundError("weight path is wrong!") 
    model.load_state_dict(torch.load(weight_path, map_location=device))
    return model


class my_Predictor():
    def __init__(self, model, prefix_len=10, clip_model_type: Optional[str] = 'ViT-B/32', device = 'cpu'):
        self.model = model
        self.device = device
        self.prefix_len = prefix_len
        self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        self.clip_model, self.preprocess = clip.load(clip_model_type, device=self.device, jit=False)
        # summary(self.clip_model.visual)
        # exit(0)
        with open(os.path.join(data_dir, 'oscar_split_ViT-B-32_train.pkl'), 'rb') as f:
            self.all_embeds = pickle.load(f)['clip_embedding']
        with open(os.path.join(data_dir, 'Image_id_index_data.pkl'), 'rb') as f:
            self.all_index = pickle.load(f)
        

    def predict(self):
        image_id = input("请输入图片编号: ")
        no_id = None
        if image_id in self.all_index.keys():
            embed = self.all_embeds[self.all_index[image_id][0]['embed_id']].unsqueeze(0).to(self.device)
            # print(embed.shape)
            image_path = f'./data/coco/train2014/COCO_train2014_{int(image_id):012d}.jpg'
            if not os.path.isfile(image_path):
                image_path = f'./data/coco/val2014/COCO_val2014_{int(image_id):012d}.jpg'
            no_id = image_id
            tokens = []
            for it in self.all_index[image_id]:
                tokens.append(it['caption'])
        else:
            image_path = input("图片编号不存在, 请输入图片路径: ")
            image = io.imread(image_path)
            image = self.preprocess(PIL.Image.fromarray(image)).unsqueeze(0).to(self.device)
            embed = self.clip_model.encode_image(image)
        
        # 用图片的embed去预测caption
        # 首先经过映射器
        embed = embed.float()
        input_embed = self.model.clip_project(embed).view(1, self.prefix_len, -1)
        out_caption = self.generate_beam2(input_embed)
        print(out_caption)
        # generate_beam(self.model, self.tokenizer, embed=input_embed)

        # 根据特征向量去找相似的图片并展示
        self.similar_fig(embed, image_path, out_caption[0], no_id)
        return out_caption

    def generate_beam2(self, input_embed, beam_size=5, max_seq_len=67, stop_word = '.'):
        stop_token = self.tokenizer.encode(stop_word)[0]
        split_token = self.tokenizer.encode(' ')[0]
        # print(stop_token)
        seq_len = torch.ones(beam_size, device=self.device)
        is_stop = torch.zeros(beam_size, device=self.device, dtype=torch.bool)
        print(">>>Predicting!")
        tokens = None
        scores = None
        with torch.no_grad():
            for i in tqdm(range(max_seq_len)):
                out = model.gpt(inputs_embeds=input_embed)
                logits = out.logits[:, -1, :]
                logits = logits.softmax(-1).log()
                if scores is None:
                    # print(1)
                    input_embed = input_embed.expand(beam_size, *input_embed.shape[1:])
                    scores, next_token = logits.topk(beam_size, -1) #[1, beam_size]
                    scores, next_token = scores.squeeze(0), next_token.permute(1, 0)
                    tokens = next_token
                else:
                    logits[is_stop] = -float(np.inf)
                    logits[is_stop, 0] = 0 # 在已经停止的里面‘.’的概率最大
                    # print(1)
                    sum_scores = scores[:, None] + logits   # [beam_size, vocab_size]
                    seq_len[~is_stop] += 1
                    average_scores = sum_scores / seq_len[:, None]

                    average_scores, next_token_total = average_scores.view(-1).topk(beam_size, -1)

                    next_token_source = next_token_total // logits.shape[1]
                    next_token = next_token_total % logits.shape[1]
                    next_token = next_token.unsqueeze(1)
                    input_embed = input_embed[next_token_source]
                    tokens = torch.cat([tokens[next_token_source], next_token], dim=1)  # [bs, i + 1]
                    seq_len = seq_len[next_token_source]
                    is_stop = is_stop[next_token_source]
                    scores = average_scores * seq_len

                if i == 0:
                    is_stop = is_stop + next_token.squeeze(1).eq(stop_token) # next_token [bs]
                else:
                    is_stop = is_stop + next_token.squeeze(1).eq(stop_token) + (tokens[:, -1].eq(split_token) & next_token.squeeze(1).eq(split_token))
                next_token_embed = self.model.gpt.transformer.wte(next_token.squeeze(1)).view(
                    beam_size, 1, -1
                ) # next_token [bs, 1, vocab_size]
                input_embed = torch.cat([input_embed, next_token_embed], dim=1)
                # seq_len[is_stop] += 1

                if all(is_stop):
                    break
            scores = scores / seq_len
            tokens = tokens.cpu()
            out = [self.tokenizer.decode(output[:int(seq_len_i)]) for output, seq_len_i in zip(tokens, seq_len)]
            index = scores.argsort(descending=True)
            out_list = [out[i] for i in index]
        return [out[i] for i in index]
    
    def similar_fig(self, embed, image_path, out_caption, no_id=None):
        dists = []
        image_list = []
        for image_id, cap_list in self.all_index.items():
            if image_id == no_id:
                continue
            dists.append(torch.norm(embed - self.all_embeds[cap_list[0]['embed_id']], 2))
            image_list.append(image_id)
        dists = torch.tensor(dists).float() # topk需要对32位浮点数进行计算
        family_dist, index = dists.topk(5, largest=False)
        # print(family_dist)
        similar_image = [image_list[i] for i in index]
        file_names = [image_path]
        descriptions = [out_caption]
        for id in similar_image:
            descriptions.append(self.all_index[id][0]['caption'])
            file_name = f'./data/coco/train2014/COCO_train2014_{int(id):012d}.jpg'
            if  not os.path.isfile(file_name):
                file_name = f'./data/coco/val2014/COCO_val2014_{int(id):012d}.jpg'
            file_names.append(file_name)
        
        # 创建画布展示六张图片及其描述，第一张是模型生成的描述，后面五张是数据集中和它最相似的五张图片

        fig, axs = plt.subplots(1, len(file_names), figsize=(15, 10))
        for ax, file_name, description in zip(axs, file_names, descriptions):
            image = Image.open(file_name)

            ax.imshow(image)
            # 在图片上添加文字框
            description = textwrap.fill(description, width=20)
            ax.annotate(
                description,  # 文字内容
                xy=(0.5, 2),  # 文字位置 (x, y)，范围是 [0, 1]
                xycoords='axes fraction',  # 使用子图坐标系
                fontsize=12, color='red',  # 字体大小和颜色
                ha='center', va='top',  # 水平居中，垂直顶部对齐
                bbox=dict(facecolor='black', alpha=0.5, edgecolor='none')  # 文字框样式
            )
            # ax.set_title(description, fontsize=12, y=1.3)
            ax.axis('off')
        # plt.tight_layout()
        plt.savefig(os.path.join(export_dir, image_path.split('/')[-1][:-4] + '-ans.png'))
        plt.show()



if __name__ == '__main__':
    model = ClipCaptionModel(10)    # 0.19B左右的参数
    # for parameters in model.gpt.parameters():
    #     parameters.requires_grad = False
    # summary(model)
    # exit(0)
    device = 'cpu'
    model = load_model(model, weight_path='./data/coco/coco_prefix-001.pt', device=device).to(device)
    model = model.eval()
    predictor = my_Predictor(model, 10, device=device)
    predictor.predict()

    # id_index_data = {}
    # with open('./data/coco/oscar_split_ViT-B-32_train.pkl', 'rb') as f:
    #     all_data = pickle.load(f)
    
    # out_path = './data/coco/Image_id_index_data.pkl'
    # all_embeds = all_data['clip_embedding'] #tensor
    # all_caption = all_data['captions']  # list
    # for i in tqdm(range(len(all_caption))):
    #     one_caption = all_caption[i]
    #     image_id = one_caption['image_id']
    #     one_index_data = {'embed_id':one_caption['embed_id'], 'caption':one_caption['caption']}
    #     if image_id in id_index_data:
    #         id_index_data[image_id].append(one_index_data)
    #     else:
    #         id_index_data[image_id] = [one_index_data]
    #     if i % 10000 == 0:
    #         with open(out_path, 'wb') as f:
    #             pickle.dump(id_index_data, f)
    # with open(out_path, 'wb') as f:
    #     pickle.dump(id_index_data, f)