import torch
import torch.nn as nn
import torch.nn.functional as F
import os


class JointModel(nn.Module):
    def __init__(self, rank_model, gen_model):
        super(JointModel, self).__init__()
        self.rank_model = rank_model
        self.gen_model = gen_model

    def rank(self, rank_inputs, rank_masks, size, k=3):
        # rank_inputs [batch_size*pool_size, query_len+knowledge_len]
        # rank_masks [batch_size*pool_size, query_len+knowledge_len]
        # size (batch_size, poo_size)
        b_s, p_s = size
        output = self.rank_model(rank_inputs, rank_masks)
        logits = output["logits"].reshape(b_s, -1)  # logits [b_s*p_s] --> [b_s, p_s]
        # scores = F.softmax(logits, dim=-1)  # [b_s, p_s]

        # knowledge_predict = torch.argmax(logits, dim=-1)  # [b_s]
        # acc = (knowledge_predict == 0).float().mean().cpu().item()
        rank = torch.topk(logits, k=k, dim=-1).indices  # [b_s, k]
        return dict(
            logits=logits,
            rank=rank,  # [b_s, k]
        )

    def generate(self, contexts, knowledges, rank_index, lang_code):
        # contexts: [batch_size, context_len]
        # knowledges: [batch_size, pool_size, knowledge_len]
        # rank_index: [batch_size, k]
        b_s, p_s, k_l = knowledges.size()
        gen_knowledges = torch.gather(knowledges, dim=1, index=rank_index.unsqueeze(-1).expand(-1, -1, k_l))  # [b_s, k, k_l]
        concat = torch.cat([gen_knowledges.reshape(b_s, -1), contexts], dim=-1)  # [b_s, k*k_l+q_l]

        predict = self.gen_model.generate(input_ids=concat, decoder_start_token_id=lang_code, num_beams=3, max_length=128)

        return dict(
            predict=predict  # [batch_size, max_len]
        )

    def decode(self, context, knowledges, rank_index, response):
        # context: [batch_size, context_len]
        # knowledges: [batch_size, pool_size, knowledge_len]
        # rank_index: [batch_size, k]
        b_s, p_s, k_l = knowledges.size()
        _, con_num = rank_index.size()
        gen_knowledges = torch.gather(knowledges, dim=1, index=rank_index.unsqueeze(-1).expand(-1, -1, k_l))  # [b_s, k, k_l]
        concat = torch.cat([gen_knowledges.reshape(b_s, -1), context], dim=-1)  # [b_s, k*k_l+q_l]

        output = self.gen_model(input_ids=concat, attention_mask=concat.ne(1).long(), decoder_input_ids=response,
                                decoder_attention_mask=response.ne(1).long(), output_attentions=True)

        mean_attentions = []
        max_attentions = []
        # output.cross_attentions  num_layers * [batch_size, k]
        # 第一个mean是平均多个layer，第二个mean是平均多个heads
        pre_attentions = torch.stack(output.cross_attentions, dim=0).mean(0).mean(1)  # [batch_size, target_len, source_len]
        mask = response.ne(1).type_as(pre_attentions)  # [batch_size, target_len] # 保证target token中的mask token不被计算在内
        for i in range(con_num):
            # mean_attentions 先加和每个target token对每个source token的注意力分数，然后平均target token的注意力分数
            mean_attentions.append((pre_attentions[:, :, i * k_l: (i + 1) * k_l].sum(-1) * mask).sum(-1) / mask.sum(-1))
            # max_attentions 先加和每个target token对每个source token的注意力分数，然后取target token的最大注意力得分
            max_attentions.append((pre_attentions[:, :, i * k_l: (i + 1) * k_l].sum(-1) * mask).max(-1).values)

        return dict(
            logits=output.logits,  # [batch_size, max_len]
            mean_attentions=torch.softmax(torch.stack(mean_attentions, dim=-1), dim=-1), # [batch_size, k]
            max_attentions=torch.softmax(torch.stack(max_attentions, dim=-1), dim=-1)  # [batch_size, k]
        )

    def load_rank(self, rankpath):
        # 加载 warm up 后的排序模型参数
        if not os.path.exists(rankpath):
            print(f"not exists rank model in {rankpath}")
            return
        print(f'Load rank model in {rankpath}')
        self.rank_model.load_state_dict(torch.load(rankpath))

    def load_gen(self, genpath):
        # 加载 warm up 后的生成模型参数
        if not os.path.exists(genpath):
            print(f"not exists generate model in {genpath}")
            return
        print(f'Load generate model in {genpath}')
        self.gen_model.load_state_dict(torch.load(genpath))
