import os
import re
import time
import random
import shutil
import logging
import argparse

import numpy as np

import json

import torch
from torch import optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader

from transformers import BertTokenizer, BertModel, BertConfig

class FFNN(torch.nn.Module):
    def __init__(self, input_size, num_hidden_layers, 
            hidden_size, output_size, dropout):
        """
            :param input_size: input size
            :param num_hidden_layers: count of hidden layers. 0 for MLP.
        """
        super().__init__()
        ffnn_layer_list = []        
        current_input_size = input_size        
        for i in range(num_hidden_layers):
            ffnn_layer_list.append(torch.nn.Linear(current_input_size, hidden_size, bias=True))
            ffnn_layer_list.append(torch.nn.ReLU())
            if dropout is not None:
                ffnn_layer_list.append(torch.nn.Dropout(dropout))
            current_input_size = hidden_size
        ffnn_layer_list.append(torch.nn.Linear(current_input_size, output_size, bias=True))

        self.ffnn = torch.nn.Sequential(*ffnn_layer_list)

    def forward(self, x):
        return self.ffnn(x)
    
class Projection(FFNN):
    def __init__(self, input_size, output_size):
        super().__init__(
            input_size=input_size,
            num_hidden_layers=0,
            hidden_size=-1, # why -1?
            output_size=output_size,
            dropout=None
        )

class MentionScorerLayer(torch.nn.Module):
    def __init__(self, emb, args):
        super().__init__()
        self.ffnn = FFNN(
            input_size=emb, # input_size
            num_hidden_layers=args.ffnn_depth, # num_hidden_layers
            hidden_size=args.ffnn_size,  # hidden_size
            output_size=1, # output_size
            dropout=args.dropout)
    
    def forward(self, span_emb):
        # TODO: implement width feature with nn.Embedding or something
        return self.ffnn(span_emb)

class SlowScorerLayer(torch.nn.Module):
    def __init__(self, emb, args):
        super().__init__()
        self.ffnn = FFNN(
            input_size=emb, # input_size
            num_hidden_layers=args.ffnn_depth, # num_hidden_layers
            hidden_size=args.ffnn_size,  # hidden_size
            output_size=1, # output_size
            dropout=args.dropout)

    def forward(self, top_span_emb, top_antecedents, top_antecedent_emb):
        """
        top_span_emb: [b, k, emb]
        top_antecedents: [b, k, c]
        top_antecedent_emb: [b, k, c, emb]
        """
        # TODO accept feature embedding

        beam_size = top_antecedents.shape[-1]
        # target_emb - [b, k, 1, emb]
        target_emb = torch.unsqueeze(top_span_emb, -2)
        # similarity_emb - [b, k, c, emb] This is a broadcasted dot-product similarity.
        similarity_emb = top_antecedent_emb * target_emb
        target_emb_shape = [-1,] * len(top_antecedent_emb.shape)
        target_emb_shape[-2] = beam_size
        # target_emb - [b, k, c, emb]
        target_emb = target_emb.expand(target_emb_shape)
        # pair_emb - [b, k, c, emb']
        pair_emb = torch.cat([target_emb, top_antecedent_emb, similarity_emb], -1)
        slow_antecedent_scores = self.ffnn(pair_emb).squeeze(-1)
        return slow_antecedent_scores

class FastScorerLayer(torch.nn.Module):
    def __init__(self, emb, args):
        super().__init__()
        self.dropout = args.dropout
        self.source_dropout = torch.nn.Dropout(self.dropout)
        self.sim_ffnn = Projection(emb, emb)
        self.target_dropout = torch.nn.Dropout(self.dropout)

    def forward(self, source_top_span_emb, target_top_span_emb): 
        """
        source/target_top_span_emb: actually the same when applied, of shape [batch_size, k, emb]
        """
        # [batch_size, k, emb]
        source_top_span_emb = self.source_dropout(self.sim_ffnn(source_top_span_emb))
        # [batch_size, k, emb]
        target_top_span_emb = self.target_dropout(target_top_span_emb)
        # [batch_size, k, k]
        return torch.matmul(source_top_span_emb,
            target_top_span_emb.transpose(1, 2))

def load_model(args):
    model = None
    if args.model != None:
        model = BertModel.from_pretrained(args.model, local_files_only=True)
    else:
        model = BertModel.from_pretrained(args.bert_model)
    return model

class RelationModel(torch.nn.Module):
    def __init__(self, args):
        super().__init__()
        self.dropout = args.dropout
        self.max_num_candidates = args.max_num_candidates
        self.max_top_antecedents = args.max_top_antecedents
        # self.bert_config = BertConfig.from_json_file(args.bert_config_file)      
        # self.encoder = BertModel(self.bert_config)
        self.encoder = load_model(args)
        mention_emb_size = 2 * args.hidden_size # BERT hidden size
        self.mention_scorer = MentionScorerLayer(mention_emb_size, args)
        self.fast_scorer = FastScorerLayer(mention_emb_size, args)
        mention_pair_emb_size = 3 * mention_emb_size # + feature_size but this is 0
        self.slow_scorer = SlowScorerLayer(mention_pair_emb_size, args)
        
        self.output_root = args.output_dir
        return
        self.bert_save_path = os.path.join(args.output_dir, "pytorch-bert")
        if not os.path.exists(self.bert_save_path):
            os.makedirs(self.bert_save_path)

    def get_full_save_path(self, subdir, mode=None, forced_write=False):
        save_path = os.path.join(self.output_root, subdir)
        if mode == "w":
            if os.path.isdir(save_path) and len(os.listdir(save_path)) != 0:
                if forced_write:
                    shutil.rmtree(save_path)
                else:
                    raise ValueError(f"Warning: save path {save_path} is not empty. Exit.")
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            if not os.path.isdir(save_path):
                raise ValueError(f"Invalid save path {save_path}. Exit.")
        elif mode == "r":
            if os.path.isdir(save_path) and len(os.listdir(save_path)) == 0:
                print(f"Warning: reading from empty path {save_path}.")
            if not os.path.exists(save_path):
                raise ValueError(f"save path {save_path} is empty. Exit.")
            if not os.path.isdir(save_path):
                raise ValueError(f"Invalid save path {save_path}. Exit.")
        return save_path

    def save_model(self, subdir, forced_write=False):
        full_dir = self.get_full_save_path(subdir, "w", forced_write)
        print(f"saving models to {full_dir}.")
        full_path = os.path.join(full_dir, "weights.pt")
        torch.save(self.state_dict(), full_path)
        
    def load_model(self, subdir):
        full_dir = self.get_full_save_path(subdir, "r")
        full_path = os.path.join(full_dir, "weights.pt")
        state_dict = torch.load(full_path, map_location=lambda storage, loc: storage)
        self.load_state_dict(state_dict)
        del state_dict
        torch.cuda.empty_cache()

    def save_pretrained(self, subdir, forced_write=False):
        full_dir = self.get_full_save_path(subdir, "w", forced_write)
        full_dir = os.path.join(full_dir, "pytorch-bert")
        self.encoder.save_pretrained(full_dir)
        print("Bert weight saved.")

    def get_parameters(self, args):
        """
        customize hyper-parameters for parameters of layers
        """
        return [
            {'params': self.parameters(), 'lr': args.bert_learning_rate },
        ]

    def extra_repr(self):
        return "Hi! I'm extra representation."

    def get_span_emb(self, context_outputs, span_starts, span_ends):
        # context_outputs - [batch_size, num_words, hidden_size]
        # span_starts - [batch_size, num_candidates]
        # span_ends - [batch_size, num_candidates]
        span_emb_list = []
        # expanded_span_starts/ends - [batch_size, num_candidates, hidden_size]
        span_indices_shape = [-1, -1, context_outputs.shape[2]] 
        expanded_span_starts = span_starts.unsqueeze(2).expand(span_indices_shape)
        expanded_span_ends = span_ends.unsqueeze(2).expand(span_indices_shape)
        # span_start/end_emb - [batch_size, num_candidates, hidden_size]
        # Collect embeddings of start & end words of spans
        span_start_emb = torch.gather(context_outputs, 1, expanded_span_starts)
        span_end_emb = torch.gather(context_outputs, 1, expanded_span_ends)
        span_emb_list.append(span_start_emb)
        span_emb_list.append(span_end_emb)

        # TODO implement embeddings for other features

        # span_emb - [batch_size, num_candidates, hidden_size*2]
        # Form span embeddings with embeddings of its start & end words
        span_emb = torch.cat(span_emb_list, 2)
        return span_emb
        
    def get_mention_scores(self, span_emb, span_starts, span_ends):
        return self.mention_scorer(span_emb)
    
    def get_slow_antecedent_scores(self, top_span_emb, 
            top_antecedents, top_antecedent_emb, feature_emb):
        return self.slow_scorer(top_span_emb, top_antecedents, top_antecedent_emb)
    
    def get_fast_antecedent_scores(self, top_span_emb):       
        return self.fast_scorer(top_span_emb, top_span_emb)

    def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, span_mask, c):
        """
        top_span_emb - [batch_size, num_candidates, emb]
        top_span_mention_scores - [batch_size, num_candidates]
        span_mask - [batch_size, num_candidates]
        c - scalar, the beam size (k=c in top_k)
        """
        # antecedents_mask - [batch_size, num_candidates, num_candidates]
        # After broadcasting, a mask like [1, .n., 1, 0, .m., 0] will look like:
        # [[ n*n 1 ] [ n*m 0 ]
        #  [ m*n 0 ] [ m*m 0 ]]
        # Where n is count of valid elements(1), and m is count of invalid elements.
        antecedents_mask = span_mask.unsqueeze(2) * span_mask.unsqueeze(1)
        # antecedents_mask = antecedents_mask >= 2
        antecedents_mask = antecedents_mask > 0

        # fast_antecedent_scores - [batch_size, num_candidates, num_candidates]
        # 0 will become -inf
        fast_antecedent_scores = torch.log(antecedents_mask.float())
        fast_antecedent_scores += top_span_mention_scores.unsqueeze(2) + \
                top_span_mention_scores.unsqueeze(1)
        fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb)

        # TODO: c > num_candidates?
        # top_antecedents - [batch_size, num_candidates, c]
        # It's possible that invalid spans (though scored -inf) will get into topk...
        _, top_antecedents = torch.topk(fast_antecedent_scores, c, sorted=False)
        # top_antecedents_mask - [batch_size, num_candidates, c]
        # ...So a mask is needed
        top_antecedents_mask = torch.gather(antecedents_mask, 2, top_antecedents)
        # top_fast_antecedent_scores - [batch_size, num_candidates, c]
        # and we gather the scores
        top_fast_antecedent_scores = torch.gather(fast_antecedent_scores, 2, top_antecedents)
        return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores

    def forward(self, input_ids, 
            input_mask, 
            candidate_starts, 
            candidate_ends, 
            candidate_mask):
        # input_ids - [batch_size, num_words] (num takes max for batched data)
        # input_mask - [batch_size, num_words]
        # candidate_starts - [batch_size, num_candidates]
        # candidate_ends - [batch_size, num_candidates]
        # candidate_mask - [batch_size, num_candidates]
        # Invalid parts(where mask value is 0) are all supposed to be 0.

        # span_doc - [batch_size, num_words, hidden_size]
        # Encode all tokens into embeddings of length [hidden_size]
        span_doc = self.encoder(
            input_ids=input_ids,
            attention_mask=input_mask
        ).last_hidden_state
        num_words = torch.sum(input_mask, dim=1)
        # candidate_span_emb - [batch_size, num_candidates, emb]
        # Form span embeddings from token embeddings. 
        # Validity can still be determined by candidate_mask.
        candidate_span_emb = self.get_span_emb(span_doc, 
            candidate_starts, candidate_ends)
        # candidate_mention_scores - [batch_size, num_candidates, 1]
        # Calculate mention score for every span. Validity det'd by candidate_mask.
        candidate_mention_scores = self.get_mention_scores(candidate_span_emb, 
            candidate_starts, candidate_ends)
        # candidate_mention_scores - [batch_size, num_candidates]
        # Squeezed mention scores.Validity det'd by candidate_mask.
        candidate_mention_scores = torch.squeeze(candidate_mention_scores, 2)
        
        # TODO: limit on max top antecedents?
        # k = torch.sum(candidate_mask, dim=1)
        beam_size = min(self.max_top_antecedents, self.max_num_candidates)

        # just for consistency. Validity det'd by candidate_mask
        # TODO maybe tops should be sorted by mention score
        top_span_starts = candidate_starts
        top_span_ends = candidate_ends
        top_span_emb = candidate_span_emb
        top_span_mention_scores = candidate_mention_scores

        # dummy_scores - [batch_size, num_candidates, 1]
        # just zero.
        dummy_scores = torch.zeros_like(top_span_mention_scores).unsqueeze(2)

        top_antecedents,  \
        top_antecedents_mask, \
        top_fast_antecedent_scores = self.coarse_to_fine_pruning(
            top_span_emb, top_span_mention_scores, candidate_mask, beam_size
        )

        # TODO: fine grained?
        # top_antecedent_scores - [batch_size, num_candidates, beam_size]
        top_antecedent_scores = top_fast_antecedent_scores
        # top_antecedent_scores - [batch_size, num_candidates, beam_size + 1]
        top_antecedent_scores = torch.cat([dummy_scores, top_antecedent_scores], 2)
        
        return [candidate_starts, candidate_ends, candidate_mask,
            top_antecedents, top_antecedents_mask, top_antecedent_scores]
