# Copyright (c) OpenMMLab. All rights reserved.
import math
from pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM, BertConfig
import torch.nn as nn
import torch
from mmocr.models.builder import ENCODERS
from .base_encoder import BaseEncoder

from mmcv.runner import ModuleList
from mmocr.models.builder import ENCODERS

@ENCODERS.register_module()
class BertTextEncoder(BaseEncoder):

    def __init__(self, json_path='',
                 weight_path='',
                 voc_path='',
                 project_dim=512,
                 max_len=50,
                 padding_idx=0):
        super().__init__()

        modelConfig = BertConfig.from_pretrained(json_path)
        self.textExtractor = BertModel.from_pretrained(weight_path, config=modelConfig)
        self.tokenizer = BertTokenizer.from_pretrained(voc_path)
        self.max_len=max_len
        embedding_dim = self.textExtractor.config.hidden_size
        self.text_projection = nn.Linear(embedding_dim, project_dim)
        self.image_projection = nn.Linear(project_dim,project_dim)

    def forward(self, texts):

        tokens, segments, input_masks = [], [], []
        for text in texts:
            tokenized_text = self.tokenizer.tokenize(text)  # 句子切分成word list
            # Convert token to vocabulary indices
            indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)  # 各个word在词表中的index位置
            if len(indexed_tokens)>self.max_len:
                indexed_tokens=indexed_tokens[0:self.max_len]
            tokens.append(indexed_tokens)
            segments.append([0] * len(indexed_tokens))
            input_masks.append([1] * len(indexed_tokens))
        # max_len = max([len(single) for single in tokens])
        # get padding and mask 用0在后面补足，使所有token list长度相同
        for j in range(len(tokens)):
            padding = [0] * (self.max_len - len(tokens[j]))
            tokens[j] += padding
            segments[j] += padding
            input_masks[j] += padding
        tokens = torch.tensor(tokens)
        segments = torch.tensor(
            segments)  # 只有一个句子的情况：tokens:[CLS] the dog is hairy . [SEP]   token_type_ids:   0   0   0   0  0     0   0
        input_masks = torch.tensor(input_masks)
        tokens = tokens.cuda()
        segments = segments.cuda()
        input_masks = input_masks.cuda()
        with torch.no_grad():
            output = self.textExtractor(tokens, token_type_ids=segments, attention_mask=input_masks)
        text_embeddings = output[0]  # output[0](batch size, sequence length, model hidden dimension)

        hash_features = self.text_projection(text_embeddings)
        return hash_features


@ENCODERS.register_module()
class BertTextEncoderV2(BaseEncoder):

    def __init__(self, json_path='',
                 weight_path='',
                 voc_path='',
                 project_dim=512,
                 max_seq_len=50,
                 padding_idx=0,
                 ):
        super().__init__()

        modelConfig = BertConfig.from_pretrained(json_path)
        self.textExtractor = BertModel.from_pretrained(weight_path, config=modelConfig)
        self.tokenizer = BertTokenizer.from_pretrained(voc_path)
        self.max_len=max_seq_len-1
        embedding_dim = self.textExtractor.config.hidden_size
        self.text_projection = nn.Linear(embedding_dim, project_dim)
        self.padding_idx = padding_idx

    def forward(self, texts):

        tokens, segments, input_masks = [], [], []
        for text in texts:
            tokenized_text = self.tokenizer.tokenize(text)  # 句子切分成word list
            # Convert token to vocabulary indices
            indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)  # 各个word在词表中的index位置
            if len(indexed_tokens)>self.max_len:
                indexed_tokens=indexed_tokens[0:self.max_len]
            tokens.append(indexed_tokens)
            segments.append([0] * len(indexed_tokens))
            input_masks.append([1] * len(indexed_tokens))
        # max_len = max([len(single) for single in tokens])
        # get padding and mask 用0在后面补足，使所有token list长度相同
        for j in range(len(tokens)):
            padding = [0] * (self.max_len - len(tokens[j]))
            tokens[j] += padding
            segments[j] += padding
            input_masks[j] += padding
        tokens = torch.tensor(tokens)
        segments = torch.tensor(
            segments)  # 只有一个句子的情况：tokens:[CLS] the dog is hairy . [SEP]   token_type_ids:   0   0   0   0  0     0   0
        input_masks = torch.tensor(input_masks)
        tokens = tokens.cuda()
        segments = segments.cuda()
        input_masks = input_masks.cuda()
        with torch.no_grad():
            output = self.textExtractor(tokens, token_type_ids=segments, attention_mask=input_masks)
        text_embeddings = output[0]  # output[0](batch size, sequence length, model hidden dimension)

        hash_features = self.text_projection(text_embeddings)
        outputs = dict(output=hash_features, pad_mask=input_masks.unsqueeze(-2))

        return outputs