from fairseq.models import BaseFairseqModel
from fairseq.models import (
    register_model,
    register_model_architecture,
)
from .transformer_lm_bpe import BPETransformerLanguageModel
# from .transformer_lm_bpe_coherence_dis import BPECDTransformerLanguageModel
# from .transformer_lm_bpe_coherence_dis_enhance import BPECDETransformerLanguageModel
from .transformer_lm_bpe_coherence_dis_article import BPECDATransformerLanguageModel

@register_model('fairseqrl')
class FairseqRlModel(BaseFairseqModel):
    """Base class for encoder-decoder models.

    Args:
        encoder (FairseqEncoder): the encoder
        decoder (FairseqDecoder): the decoder
    """

    def __init__(self, generator, discriminator):
        super().__init__()

        self.generator = generator
        self.discriminator = discriminator

    def update_generator(self, generator):
        self.generator = generator

    def update_discriminator(self, discriminator):
        self.discriminator = discriminator

    @staticmethod
    def add_args(parser):
        BPETransformerLanguageModel.add_args(parser)
        parser.add_argument('--pretrained-checkpoint-dis', metavar='DIR',
                            help='path to load checkpoint from pretrained model')

    @classmethod
    def build_model(cls, args, task):
        generator = BPETransformerLanguageModel.build_model(args, task)
        discriminator = BPECDATransformerLanguageModel.build_model(args, task)
        return FairseqRlModel(generator, discriminator)


@register_model_architecture('fairseqrl', 'fairseqrl')
def base_lm_architecture_bpe(args):
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
    args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
    args.decoder_layers = getattr(args, 'decoder_layers', 6)
    args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
    args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
    args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
    args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
    args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
    args.activation_fn = getattr(args, 'activation_fn', 'relu')

    args.add_bos_token = getattr(args, 'add_bos_token', False)
    args.character_embeddings = getattr(args, 'character_embeddings', False)

    args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
    args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)

    # The model training is not stable without this
    args.decoder_normalize_before = True

    args.adaptive_input = getattr(args, 'adaptive_input', False)
    args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
    args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)

    args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
    args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)


@register_model_architecture('fairseqrl', 'fairseqrl_gpt')
def transformer_lm_gpt_bpe(args):
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
    args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
    args.decoder_layers = getattr(args, 'decoder_layers', 12)
    args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
    args.dropout = getattr(args, 'dropout', 0.1)
    args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
    args.decoder_final_norm = getattr(args, 'decoder_final_norm', True)
    args.activation_fn = getattr(args, 'activation_fn', 'gelu')
    args.share_decoder_input_output_embed = True
    base_lm_architecture_bpe(args)


@register_model_architecture('fairseqrl', 'fairseqrl_gpt_format')
def transformer_lm_gpt_bpe_format(args):
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
    args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
    args.decoder_layers = getattr(args, 'decoder_layers', 12)
    args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
    args.dropout = getattr(args, 'dropout', 0.1)
    args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
    args.activation_fn = getattr(args, 'activation_fn', 'gelu_accurate')
    args.share_decoder_input_output_embed = True
    args.decoder_normalize_before = True
    args.decoder_final_norm = True
    args.decoder_learned_pos = True
    args.decoder_embed_scale = False
    base_lm_architecture_bpe(args)