import numpy as np
import torch
import torchvision
from pyexpat import features
from torch import nn
from torch.backends import cudnn
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from transformers import BertModel, BertConfig

from loss import ContrastiveLoss
from model.GPO import GPO
from model.MLP import MLP
from model.newPool import myModule
from utils import LoggerSingleton

bert_model_path = "./model_param/bert"
model_config = BertConfig.from_pretrained(bert_model_path)  # 导入配置文件


def l2norm(X, dim=-1, eps=1e-8):
    """L2-normalize columns of X"""
    norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
    X = torch.div(X, norm)
    return X


def get_sim(images, captions):
    similarities = images.mm(captions.t())
    return similarities


class MyModule:
    def __init__(self, opts, logger, writer=None):
        super().__init__()
        self.eiter = 0
        self.is_train = False
        self.grad_clip = opts.grad_clip
        self.opt = opts
        # 日志数据记录
        self.l = logger
        self.w = writer
        # 定义图文特征提取器
        self.img_enc = ImageEncoder(opts, logger=self.l)
        # self.img_enc = vit_base_patch16_224_in21k(1024)
        self.txt_enc = TextEncoder(opts, logger=self.l)
        # self.txt_enc = EncoderText(opts)

        # 损失函数
        self.criterion = ContrastiveLoss(opts, logger=logger)
        # 将它们移到GPU中
        if torch.cuda.is_available():
            self.img_enc.cuda()
            self.txt_enc.cuda()
            self.criterion.cuda()
            cudnn.benchmark = True

        # 定义模型所有参数
        named_params = list(self.img_enc.named_parameters())
        named_params += list(self.txt_enc.named_parameters())

        # 冻结一部分参数
        # for name, param in named_params:
        #     if 'bert' in name:
        #         param.requires_grad = False
        # 参与梯度传播的参数
        self.valid_param = list(filter(lambda p: p[1].requires_grad, named_params))
        # 优化器,使用adam
        l1 = ['bert']

        def model_f(n, li):
            return any(k in n for k in li)

        p = [v for k, v in self.valid_param]
        # bert_ = [v for k, v in self.valid_param if model_f(k, l1)]
        # no_bert_ = [v for k, v in self.valid_param if not model_f(k, l1)]
        # bert = [k for k, v in self.valid_param if model_f(k, l1)]
        # no_bert = [k for k, v in self.valid_param if not model_f(k, l1)]
        # print(bert)
        # print(no_bert)
        grouped_parameters = [
            # {'params': bert_, 'lr': opts.lr * 0.1},
            # {'params': no_bert_, 'lr': opts.lr},
            {'params': p, 'lr': opts.lr},
        ]

        decay_factor = 1e-4
        self.optimizer = torch.optim.AdamW(grouped_parameters, lr=opts.lr, )

    def set_lr(self, opts, num_epoch=None):

        # for name, para in self.img_enc.named_parameters():
        #     # 除head, pre_logits外，其他权重全部冻结
        #     if "head" not in name and "pre_logits" not in name:
        #         para.requires_grad_(False)
        #     else:
        #         print("training {}".format(name))
        # for param in self.img_enc.parameters():
        #     param.requires_grad = False
        # for name, param in self.txt_enc.named_parameters():
        #     if 'bert' in name:
        #         param.requires_grad = False
        # self.l.info('Some backbone frozen.')
        # 优化器,使用adam
        pass

    # gpo_ = [v for k, v in self.valid_param if model_f(k, l1)]
    # no_gpo_ = [v for k, v in self.valid_param if not model_f(k, l1)]

    def set_max_violation(self, max_violation):
        if max_violation:
            self.criterion.max_violation_on()
        else:
            self.criterion.max_violation_off()

    def log_structure(self):
        self.l.info(f'{self.img_enc}')
        self.l.info(f'{self.txt_enc}')
        self.l.info(f'{self.criterion}')
        self.l.info(f'{self.optimizer}')

    def state_dict(self):
        state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]
        return state_dict

    def load_state_dict(self, state_dict):
        self.img_enc.load_state_dict(state_dict[0], strict=False)
        self.txt_enc.load_state_dict(state_dict[1], strict=False)

    def unfreeze_backbone(self, fixed_blocks):
        self.img_enc.unfreeze_backbone(fixed_blocks)
        # pass

    def train_start(self):
        self.img_enc.train()
        self.txt_enc.train()
        self.is_train = True

    def val_start(self):
        self.img_enc.eval()
        self.txt_enc.eval()
        self.is_train = False

    # 开始训练
    def train_emb(self, images, captions, cap_len, img_len=None, warmup_alpha=None):
        self.eiter += 1
        # self.w.add_scalar('Eit', self.Eiters)
        self.w.add_scalar('lr', self.optimizer.param_groups[0]['lr'], self.eiter)
        # --------------------------------标准流程---------------------------------
        # 前向传播 input：batched数据集  output：score矩阵
        img_emb, cap_emb = self.forward_emb(images, captions, cap_len, img_len=img_len)
        sim_score = get_sim(img_emb, cap_emb)
        # 梯度清零
        self.optimizer.zero_grad()
        # 计算损失
        loss = self.criterion(sim_score)
        # 反向传播
        if warmup_alpha is not None:
            loss = loss * warmup_alpha
        loss.backward()
        # 更新梯度
        if self.grad_clip > 0:
            clip_grad_norm_([v for k, v in self.valid_param], self.grad_clip)
        self.optimizer.step()
        return loss.item()

    # 前向传播,核心函数,输入原始数据，输出格式化的数据提供给损失函数部分，这一部分需要包括所有可训练的部分。
    def forward_emb(self, images, captions, cap_len, img_len=None):
        """计算图文嵌入
        """
        img_emb = self.forward_img(images, img_len)
        cap_emb = self.forward_text(captions, cap_len)

        return img_emb, cap_emb

    def forward_img(self, img, img_len):
        if torch.cuda.is_available():
            img = img.cuda()
            img_len = img_len.cuda()
        img_emb = self.img_enc(img, img_len)  # 没问题
        return img_emb

    def forward_text(self, captions, cap_len):
        if torch.cuda.is_available():
            captions = captions.cuda()
        cap_len = torch.Tensor(cap_len)
        cap_emb = self.txt_enc(captions, cap_len)
        return cap_emb

    # 计算损失
    # def forward_loss(self, img_emb, cap_emb):
    #     loss = self.criterion(img_emb, cap_emb)
    #     return loss


class ImageEncoder(nn.Module):
    def __init__(self, opts, weight='xavier', logger=None, writer=None):
        super().__init__()
        self.l = logger
        self.w = writer
        self.imgnorm = opts.imgnorm
        # self.resnet_enc = ResnetFeatureExtractor(backbone_source='detector', weights_path=opts.backbone_path,
        #                                          logger=logger,
        #                                          fixed_blocks=2)  # ResNet101，bs*64*2048
        self.mlp = MLP(2048, opts.embed_size // 2, opts.embed_size, 2)
        self.gpool = GPO(32, 32)
        # self.pool_ = ValueLevelAttentionPooling(1024)
        self.newmodule = myModule(dim=1024)

        self.fc = nn.Linear(opts.img_dim, opts.embed_size)

        # self.init_fc_weights()
        if weight == 'xavier':
            print("img:xavier")
            nn.init.xavier_normal_(self.fc.weight)  # 权重初始化方式
        elif weight == 'kaiming':
            print("img:kaiming")
            nn.init.kaiming_normal_(self.fc.weight)

    def forward(self, x, lengths):
        # base_features = self.resnet_enc(x)
        base_features = x
        # if self.training:
        #     # Size Augmentation during training, randomly drop grids 想办法生成不一致的图像区域数量，数据增强
        #     # base_length = base_features.size(1)
        #     # features = []
        #     # feat_lengths = []
        #     # rand_list_1 = np.random.rand(base_features.size(0), base_features.size(1))
        #     # rand_list_2 = np.random.rand(base_features.size(0))
        #     # for i in range(base_features.size(0)):
        #     #     if rand_list_2[i] > 0.2:
        #     #         feat_i = base_features[i][np.where(rand_list_1[i] > 0.20 * rand_list_2[i])]
        #     #         len_i = len(feat_i)
        #     #         pads_i = torch.zeros(base_length - len_i, base_features.size(-1)).to(base_features.device)
        #     #         feat_i = torch.cat([feat_i, pads_i], dim=0)
        #     #     else:
        #     #         feat_i = base_features[i]
        #     #         len_i = base_length
        #     #     feat_lengths.append(len_i)
        #     #     features.append(feat_i)
        #     # base_features = torch.stack(features, dim=0)
        #     # base_features = base_features[:, :max(feat_lengths), :]
        #     # feat_lengths = torch.tensor(feat_lengths).to(base_features.device)
        #     feat_lengths = torch.zeros(base_features.size(0)).to(base_features.device)
        #     feat_lengths[:] = base_features.size(1)
        # else:
        #     feat_lengths = torch.zeros(base_features.size(0)).to(base_features.device)
        #     feat_lengths[:] = base_features.size(1)

        enc = self.fc(base_features)
        enc = self.mlp(base_features) + enc

        # features, pool_weights = self.gpool(enc, lengths)
        # features, _ = self.gpool(enc, lengths)
        features = self.newmodule(enc, lengths)
        # features = self.image_encoder(base_features, feat_lengths)
        if self.imgnorm:
            # features = torch.nn.functional.normalize(features, p=2, dim=-1, eps=1e-8)
            features = l2norm(features, dim=-1)
        return features

    # def init_fc_weights(self):
    #     """全连接层的Xavier初始化
    #     """
    #     r = np.sqrt(6.) / np.sqrt(self.fc.in_features + self.fc.out_features)
    #     self.fc.weight.data.uniform_(-r, r)
    #     self.fc.bias.data.fill_(0)

    def freeze_backbone(self):
        """冻结主干网络
        """
        for param in self.resnet_enc.parameters():
            param.requires_grad = False
        LoggerSingleton().info('Backbone frozen.')

    def unfreeze_backbone(self, fixed_blocks):
        """解冻主干网络
        """
        for param in self.resnet_enc.parameters():  # open up all params first, then adjust the base parameters
            param.requires_grad = True
        self.resnet_enc.set_fixed_blocks(fixed_blocks)
        self.resnet_enc.unfreeze_base()
        LoggerSingleton().info(f'Backbone unfreeze, fixed blocks {self.resnet_enc.get_fixed_blocks()}')


class TextEncoder(nn.Module):
    def __init__(self, opts, weight='xavier', logger=None, writer=None):
        super().__init__()
        self.l = logger  # log
        self.w = writer  # tensorboard
        self.txtnorm = opts.txtnorm
        self.use_bi_gru = opts.bi_gru
        # 模型
        self.embed = nn.Embedding(opts.vocab_size, opts.word_dim)
        self.cap_rnn = nn.GRU(opts.word_dim, opts.embed_size, opts.num_layers, batch_first=True,
                              bidirectional=opts.bi_gru)
        # self.fc = nn.Linear(opts.word_dim, opts.embed_size)
        self.dropout = nn.Dropout(0.4)
        # self.pool_ = ValueLevelAttentionPooling(1024)
        self.gpool = GPO(32, 32)
        self.newmodule = myModule(dim=1024)
        if weight == 'xavier':
            print("Txt:xavier")
            nn.init.xavier_normal_(self.embed.weight)
        elif weight == 'kaiming':
            print("Txt:kaiming")
            nn.init.kaiming_normal_(self.embed.weight)

    def forward(self, captions, lengths):
        """Handles variable size captions"""
        # embed word ids to vectors

        cap_emb = self.embed(captions)
        # cap_emb1 = self.dropout(cap_emb)
        self.cap_rnn.flatten_parameters()
        # pack the caption
        packed = pack_padded_sequence(cap_emb, lengths, batch_first=True, enforce_sorted=False)

        # forward propagate RNN
        out, _ = self.cap_rnn(packed)

        # reshape output to (batch_size, hidden_size)
        cap_emb2, lennn = pad_packed_sequence(out, batch_first=True)

        cap_emb2 = (cap_emb2[:, :, : cap_emb2.size(2) // 2] + cap_emb2[:, :, cap_emb2.size(2) // 2:]) / 2

        # cap_emb3 = self.linear(cap_emb2)
        # pad = (captions != 0).float()
        # bert_output = self.bert_basemodel(captions, pad)
        # cap_emb2 = self.fc(bert_output[0])
        # pooled_features, pool_weights = self.gpool(cap_emb2, lengths.cuda())
        pooled_features = self.newmodule(cap_emb2, lengths.cuda())
        if self.txtnorm:
            # pooled_features = nn.functional.normalize(pooled_features, p=2, dim=-1, eps=1e-8)
            pooled_features = l2norm(pooled_features, dim=-1)
        return pooled_features


class EncoderText(nn.Module):
    def __init__(self, opts, weight='xavier', logger=None, writer=None):
        super().__init__()
        self.l = logger
        self.w = writer
        self.embed_size = opts.embed_size
        self.txt_norm = opts.txtnorm

        self.bert = BertModel.from_pretrained(bert_model_path, config=model_config)
        # self.mlp = MLP(768, opts.embed_size // 2, opts.embed_size, 2)
        self.linear = nn.Linear(768, self.embed_size)
        self.gpool = GPO(32, 32)

        nn.init.xavier_normal_(self.linear.weight)

    def forward(self, x, lengths):
        """Handles variable size captions
        """
        # Embed word ids to vectors
        attn_mask = (x != 0).float()
        # bert_emb = self.bert(x, bert_attention_mask)[0]  # B x N x D
        bert_emb = self.bert(input_ids=x, attention_mask=attn_mask)  # B x N x D
        emb = bert_emb.last_hidden_state
        # torch.save(bert_emb, 'bert_emb.pt')
        # cap_len = lengths
        cap_len = torch.zeros(emb.size(0)).to(emb.device)
        cap_len[:] = emb.size(1)

        cap_emb = self.linear(emb)
        # cap_emb = self.mlp(emb) + cap_emb
        pooled_features, pool_weights = self.gpool(cap_emb, cap_len)
        # normalization in the joint embedding space
        if self.txt_norm:
            pooled_features = l2norm(pooled_features, dim=-1)
            # pooled_features = nn.functional.normalize(pooled_features, p=2, dim=-1, eps=1e-8)
        return pooled_features


if __name__ == '__main__':
    # bert = BertModel.from_pretrained(bert_model_path, config=model_config)

    # m = ImageEncoder(2048, 1024, opts.getparser())
    # x = torch.randn((32, 3, 256, 256))
    # y = m(x)
    # print(y.size())
    # print(m)
    # print(tokenizer)
    pass
