from logging import getLogger
import logging
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.modules import sparse
from utils import clip_parameters, get_optimizer

logger = getLogger('Models')
logger.setLevel(logging.INFO)

class Discriminator(nn.Module):
    def __init__(self, args, emb_dim):
        super(Discriminator, self).__init__()

        self.emb_dim = emb_dim
        self.dis_layers = args.dis_layers
        self.dis_hid_dim = args.dis_hid_dim
        self.dis_dropout = args.dis_dropout
        self.dis_input_dropout = args.dis_input_dropout

        layers = [nn.Dropout(self.dis_input_dropout)]
        # DNN
        for i in range(self.dis_layers + 1):
            input_dim = self.emb_dim if i == 0 else self.dis_hid_dim
            output_dim = 1 if i == self.dis_layers else self.dis_hid_dim
            layers.append(nn.Linear(input_dim, output_dim))
            if i < self.dis_layers:
                layers.append(nn.LeakyReLU(0.2))
                layers.append(nn.Dropout(self.dis_dropout))
        layers.append(nn.Sigmoid())
        self.layers = nn.Sequential(*layers)
    
    def forward(self, x):
        assert x.dim() == 2 and x.size(1) == self.emb_dim
        return self.layers(x).view(-1)

class Trainer():
    def __init__(self, mapping, discriminator, args):
        self.batch_size = args.batch_size
        self.dis_most_frequent = args.dis_most_frequent
        self.dis_clip_weights = args.dis_clip_weights
        self.dis_lambda = args.dis_lambda
        self.src_dim = args.src_dim
        self.trg_dim = args.trg_dim
        self.map_beta = args.map_beta
        self.lr_decay = args.lr_decay
        self.min_lr = args.min_lr
        self.lr_shrink = args.lr_shrink
        self.mapping = mapping
        self.discriminator = discriminator
        self.device = args.device
        if args.map_optimizer:
            optim_fn, optim_params = get_optimizer(args.map_optimizer)
            self.map_optimizer = optim_fn(mapping.parameters(), **optim_params)
        if args.dis_optimizer:
            optim_fn, optim_params = get_optimizer(args.dis_optimizer)
            self.dis_optimizer = optim_fn(discriminator.parameters(), **optim_params)
        else:
            assert discriminator is None
        # best validation score
        self.best_valid_metric = -1e12
        self.decrease_lr = False
    
    def dis_step(self, src_embedding, trg_embedding, stats):
        self.discriminator.train()
        self.mapping.eval()

        #loss
        feed_indices = torch.randint(src_embedding.shape[0], size=(self.batch_size,))
        x = src_embedding[feed_indices]
        z = trg_embedding[feed_indices]
        input_embedding = torch.cat((self.mapping(x), z))
        labels = torch.cat((torch.zeros(x.shape[0]), torch.ones(z.shape[0]))).to(self.device)
        preds = self.discriminator(input_embedding).to(self.device)
        loss = F.binary_cross_entropy(preds, labels)
        stats['DIS_COSTS'].append(loss.data.item())

        if (loss != loss).data.any():
            logger.error("NaN detected (discriminator)")
            exit()

        # optim
        self.dis_optimizer.zero_grad()
        loss.backward()
        self.dis_optimizer.step()
        clip_parameters(self.discriminator, self.dis_clip_weights)
    
    def mapping_step(self, src_embedding, stats):

        if self.dis_lambda == 0:
            return 0

        self.discriminator.eval()
        self.mapping.train()

        #loss
        feed_indices = torch.randint(src_embedding.shape[0], size=(self.batch_size,))
        x = src_embedding[feed_indices]
        input_embedding = self.mapping(x)
        labels = torch.ones(x.shape[0]).to(self.device)
        preds = self.discriminator(input_embedding).to(self.device)
        loss = F.binary_cross_entropy(preds, labels) * self.dis_lambda

        # nan
        if (loss != loss).data.any():
            logger.error("NaN detected (fool discriminator)")
            exit()

        # optim
        self.map_optimizer.zero_grad()
        loss.backward()
        self.map_optimizer.step()
        self.orthogonalize_mapping()

        return 2 * self.batch_size

    def orthogonalize_mapping(self):
        """
        Orthogonalize the mapping.
        """
        if self.map_beta > 0:
            W = self.mapping.weight.data
            beta = self.map_beta
            W.copy_((1 + beta) * W - beta * W.mm(W.transpose(0, 1).mm(W)))
    
    
    def update_lr(self, to_log, metric):
        """
        Update learning rate when using SGD.
        """
        if 'SGD' != type(self.map_optimizer).__name__:
            return
        old_lr = self.map_optimizer.param_groups[0]['lr']
        new_lr = max(self.min_lr, old_lr * self.lr_decay)
        if new_lr < old_lr:
            logger.info("Decreasing learning rate: %.8f -> %.8f" % (old_lr, new_lr))
            self.map_optimizer.param_groups[0]['lr'] = new_lr

        if self.lr_shrink < 1: #and to_log[metric] >= -1e7:
            # if to_log[metric] < self.best_valid_metric:
            #     logger.info("Validation metric is smaller than the best: %.5f vs %.5f"
            #                 % (to_log[metric], self.best_valid_metric))
                # decrease the learning rate, only if this is the
                # second time the validation metric decreases
                if self.decrease_lr:
                    old_lr = self.map_optimizer.param_groups[0]['lr']
                    self.map_optimizer.param_groups[0]['lr'] *= self.lr_shrink
                    logger.info("Shrinking the learning rate: %.5f -> %.5f"
                                % (old_lr, self.map_optimizer.param_groups[0]['lr']))
                self.decrease_lr = True




def build_model(args, with_dis):
    # mapping
    mapping = nn.Linear(args.src_dim, args.trg_dim, bias = True).to(args.device)

    # discriminator 
    discriminator = Discriminator(args, args.trg_dim).to(args.device) if with_dis else None
    
    return mapping, discriminator