import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm

from model.config_layer import config_lexicon


class GazViewFeature(nn.Module):
    def __init__(self, data):
        super(GazViewFeature, self).__init__()

        self.use_biword = config_lexicon["use_biword"]
        self.use_char = config_lexicon["use_char"]
        self.use_count = config_lexicon["use_count"]

        self.word_emb_dim = config_lexicon["word_emb_dim"]
        self.biword_emb_dim = config_lexicon["biword_emb_dim"]

        self.gaz_emb_dim = config_lexicon["gaz_embed_dim"]
        self.pooler_dim = config_lexicon["pooler_dim"]

        self.hidden_dim = self.word_emb_dim + 4 * self.gaz_emb_dim

        self.fix_embedding = config_lexicon["fix_embedding"]

        self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.word_emb_dim)
        self.biword_embedding = nn.Embedding(
            data.biword_alphabet.size(), self.biword_emb_dim
        )
        self.gaz_embedding = nn.Embedding(data.gaz_alphabet.size(), self.gaz_emb_dim)

        if self.use_char:
            # self.hidden_dim += self.word_emb_dim  # 必须进行使用的
            if data.pretrain_word_embedding is not None:
                self.word_embedding.weight.data.copy_(
                    torch.from_numpy(data.pretrain_word_embedding)
                )
                if self.fix_embedding:
                    self.word_embedding.weight.requires_grad = False

            else:
                self.word_embedding.weight.data.copy_(
                    torch.normal(
                        mean=0,
                        std=1,
                        size=(data.word_alphabet.size(), self.word_emb_dim),
                    )
                )

        if self.use_biword:
            self.hidden_dim += self.biword_emb_dim

            if data.pretrain_biword_embedding is not None:
                self.biword_embedding.weight.data.copy_(
                    torch.from_numpy(data.pretrain_biword_embedding)
                )
                if self.fix_embedding:
                    self.biword_embedding.weight.requires_grad = False

            else:
                self.biword_embedding.weight.data.copy_(
                    torch.normal(
                        mean=0,
                        std=1,
                        size=(data.biword_alphabet.size(), self.biword_emb_dim),
                    )
                )

        if data.pretrain_gaz_embedding is not None:
            self.gaz_embedding.weight.data.copy_(
                torch.from_numpy(data.pretrain_gaz_embedding)
            )
            if self.fix_embedding:
                self.gaz_embedding.weight.requires_grad = False
        else:
            self.gaz_embedding.weight.data.copy_(
                torch.normal(
                    mean=0,
                    std=1,
                    size=(data.gaz_alphabet.size(), self.gaz_emb_dim),
                )
            )

        # self.pooler = nn.Sequential(
        #     nn.Dropout(), nn.Linear(self.hidden_dim, self.pooler_dim)
        # )

    def forward(
        self,
        word_inputs,
        biword_inputs,
        layer_gaz,
        gaz_count,
        gaz_chars,
        gaz_mask_input,
        gazchar_mask_input,
    ):

        batch_size = word_inputs.size()[0]
        seq_len = word_inputs.size()[1]
        word_embs = self.word_embedding(word_inputs)  # [b, l, dim]

        if self.use_biword:
            biword_embs = self.biword_embedding(biword_inputs)
            word_embs = torch.cat([word_embs, biword_embs], dim=-1)

        if self.use_char:
            gazchar_embeds = self.word_embedding(gaz_chars)
            gazchar_mask = gazchar_mask_input.unsqueeze(-1).repeat(
                1, 1, 1, 1, 1, self.word_emb_dim
            )
            gazchar_embeds = gazchar_embeds.data.masked_fill_(
                gazchar_mask.data.bool(), 0
            )  # (b,l,4,gl,cl,ce)

            # gazchar_mask_input:(b,l,4,gl,cl)
            gaz_charnum = (
                (gazchar_mask_input == 0).sum(dim=-1, keepdim=True).float()
            )  # (b,l,4,gl,1)
            gaz_charnum = gaz_charnum + (gaz_charnum == 0).float()
            gaz_embeds = gazchar_embeds.sum(-2) / gaz_charnum  # (b,l,4,gl,ce)

        else:  # use gaz embedding
            gaz_embeds = self.gaz_embedding(layer_gaz)

            gaz_mask = gaz_mask_input.unsqueeze(-1).repeat(1, 1, 1, 1, self.gaz_emb_dim)
            # gaz_embeds = gaz_embeds.data.masked_fill_(gaz_mask.data, 0)  #(b,l,4,g,ge)  ge:gaz_embed_dim

            gaz_embeds = gaz_embeds.data.masked_fill_(
                gaz_mask.data.bool(), 0
            )  # (b,l,4,g,ge)  ge:gaz_embed_dim

        if self.use_count:
            count_sum = torch.sum(gaz_count, dim=3, keepdim=True)  # (b,l,4,gn)
            count_sum = torch.sum(count_sum, dim=2, keepdim=True)  # (b,l,1,1)

            weights = gaz_count.div(count_sum)  # (b,l,4,g)
            weights = weights * 4
            weights = weights.unsqueeze(-1)
            gaz_embeds = weights * gaz_embeds  # (b,l,4,g,e)
            gaz_embeds = torch.sum(gaz_embeds, dim=3)  # (b,l,4,e)

        else:
            gaz_num = (
                (gaz_mask_input == 0).sum(dim=-1, keepdim=True).float()
            )  # (b,l,4,1)
            gaz_embeds = gaz_embeds.sum(-2) / gaz_num  # (b,l,4,ge)/(b,l,4,1)
            ##
            gaz_embeds = torch.nan_to_num(gaz_embeds)

        gaz_embeds = gaz_embeds.view(batch_size, seq_len, -1)  # (b,l,4*ge)

        feature = torch.cat([word_embs, gaz_embeds], dim=-1)

        # breakpoint()
        # return self.pooler(feature)  # [b, l, 100]
        return feature
