import json
import os
from pathlib import Path

from torchvision.transforms import Compose, ToTensor, Normalize, Resize, InterpolationMode
import torch
import torch.nn.functional as F
import torch.nn as nn
from cn_clip.clip.model import convert_weights
from cn_clip.clip import _tokenizer, tokenize

from .clip import CLIP
from utils.measure_data_distribution import measure


class LinearProbe(nn.Module):
    def __init__(self, config):
        super(LinearProbe, self).__init__()

        self.fc1 = nn.Linear(config["input_size"], config["num_classes"])
        # self.fc2 = nn.Linear(config["hidden_size"], config["hidden_size"])
        # self.fc3 = nn.Linear(config["hidden_size"], config["num_classes"])

    def forward(self, x):
        # x = F.relu(self.fc1(x))
        # x = F.relu(self.fc2(x))
        # x = self.fc3(x)
        return F.softmax(self.fc1(x), dim=1)


class TextCNN(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, config['out_channels'], (k, config['hidden_dim'])) for k in config['filter_sizes']]
        )
        self.dropout = nn.Dropout(config['dropout_rate'])

    def conv_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x

    def forward(self, x):
        out = x.unsqueeze(1)
        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
        out = self.dropout(out)
        return out

class BERT_TextCNN(nn.Module):
    def __init__(self, CLIP, config):
        super().__init__()
        self.dtype = CLIP.visual.conv1.weight.dtype
        self.tokenizer = _tokenizer
        self.BERT = CLIP.bert
        self.BERT_cls_projection = CLIP.text_projection
        self.CLIP_logit_scale = CLIP.logit_scale.exp()

        self.textCNN = TextCNN(config['TextCNN'])
        self.BERT_config = config['BERT']

    def forward(self, text):
        # 通过CLIP的BERT, 该模型被冻住
        pad_index = self.tokenizer.vocab['[PAD]']
        attn_mask = text.ne(pad_index).type(self.dtype)

        output_BERT = self.BERT(text, attention_mask=attn_mask)   # tuple 0(last hidden layer output)  1(all hidden layer output 13 layers)

        last_hidden_layer = output_BERT[0]
        all_hidden_layer = output_BERT[2]

        # 抽取每一层的embedding   (研究抽取每一层的cls做对比)
        embeddings = []
        for layer in self.BERT_config['extract_layer']:
            embedding = all_hidden_layer[layer][:, 1:, :]
            embedding = embedding / embedding.norm(dim=1, keepdim=True)
            embeddings.append(embedding)

        # 输入到TextCNN
        output_textCNN = self.textCNN(torch.cat(embeddings, dim=1))

        # 得到最后一层的cls, 并进行缩放 (探究缩放于不缩放时候的cls和embedding的数据分布差异)
        cls = last_hidden_layer[:, 0, :] @ self.BERT_cls_projection
        cls = self.CLIP_logit_scale * cls / cls.norm(dim=1, keepdim=True)

        return output_textCNN, cls


class Classifier(nn.Module):
    def __init__(self, config):
        super(Classifier, self).__init__()
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.tokenizer = tokenize

        self.CLIP, self.img_processor = self.load_model_from_checkpoint(config)
        self.freeze_clip()

        self.BERT_TextCNN = BERT_TextCNN(self.CLIP, config)

        self.line_probe = LinearProbe(config)

    def load_model_from_checkpoint(self, config):
        # loading saved checkpoint
        with open(config["checkpoint_path"], 'rb') as opened_file:
            checkpoint = torch.load(opened_file, map_location="cpu")
        vision_model, text_model = config["model_name"]["vision"], config["model_name"]["text"]

        # Initialize the model.
        vision_model_config_file = Path(
            __file__).parent / f"model_configs/{vision_model.replace('/', '-')}.json"
        print('Loading vision model model_configs from', vision_model_config_file)
        assert os.path.exists(vision_model_config_file)

        text_model_config_file = Path(
            __file__).parent / f"model_configs/{text_model.replace('/', '-')}.json"
        print('Loading text model model_configs from', text_model_config_file)
        assert os.path.exists(text_model_config_file)

        with open(vision_model_config_file, 'r') as fv, open(text_model_config_file, 'r') as ft:
            model_info = json.load(fv)
            for k, v in json.load(ft).items():
                model_info[k] = v
        if isinstance(model_info['vision_layers'], str):
            model_info['vision_layers'] = eval(model_info['vision_layers'])
        print('Model info', model_info)

        model = CLIP(config, **model_info)
        convert_weights(model)

        if checkpoint:
            sd = checkpoint["state_dict"]
            if next(iter(sd.items()))[0].startswith('module'):
                sd = {k[len('module.'):]: v for k, v in sd.items() if "bert.pooler" not in k}
            model.load_state_dict(sd)
        return model, image_transform(model_info["image_resolution"])

    def freeze_clip(self):
        for param in self.CLIP.parameters():
            param.requires_grad = False

    def forward(self, img, text):
        logit_scale = self.CLIP.logit_scale.exp()
        text = self.tokenizer(text).to(self.device)
        img = img.to(self.device)

        # 图像表征, 并进行缩放
        f_img = self.CLIP.encode_image(img)
        f_img = logit_scale * f_img / f_img.norm(dim=1, keepdim=True)

        # 文本表征
        f_text, cls = self.BERT_TextCNN(text)

        f_fuse = torch.cat((f_img, cls, f_text), dim=1)
        predictions = self.line_probe(f_fuse)
        return predictions


def is_residual_attention_block(module):
    if isinstance(module, nn.Module):
        children = list(module.named_children())
        if len(children) == 4:
            attn, ln1, mlp, ln2 = children
            if (
                isinstance(attn[1], nn.MultiheadAttention) and
                isinstance(ln1[1], nn.LayerNorm) and
                isinstance(mlp[1], nn.Sequential) and
                isinstance(ln2[1], nn.LayerNorm)
            ):
                return True
    return False

def replace_dropout_layer(model, dropout_rate):
    for name, child in model.named_children():
        # BERT模块改变Dropout比例
        if isinstance(child, nn.Dropout):
            child.p = dropout_rate["text"]

        else:
            replace_dropout_layer(child, dropout_rate)

def image_transform(image_size=224):
    transform = Compose([
        Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
        _convert_to_rgb,
        ToTensor(),
        Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
    ])
    return transform

def _convert_to_rgb(image):
    return image.convert('RGB')
