import torch
from torch import nn
from typing import List
from varflow.distributions import Distribution, ConditionalCategorical
from varflow.flows import FlowLayer


class Collector:
    def __init__(self):
        self.reset()

    def log_prob(self, z):
        self.z.append(z)
        return z.new_zeros(z.shape[0])
    
    def log_prob_with_sll(self, z):
        self.z.append(z)
        return z.new_zeros(z.shape[0]), 0
    
    def sample(self, z):
        return self.samples
    
    def reset(self):
        self.z = []
        self.samples = None

class MultiStream(FlowLayer):
    """A MultiStream of multiple distributions with the same prior."""

    def __init__(self, flows: List[FlowLayer], sampler: ConditionalCategorical):
        super(MultiStream, self).__init__()
        self.sampler = sampler
        self.collector = Collector()
        self.flows = nn.ModuleList(flows)
        for flow in self.flows:
            flow(self.collector)
        self.register_buffer('alpha', torch.ones(len(flows)) / len(flows))

    def log_prob(self, x):
        flow_log_prob = torch.cat([flow.log_prob(x) for flow in self.flows])
        log_prob = self.base_dist.log_prob(torch.cat(self.collector.z)) + flow_log_prob
        log_prob = torch.chunk(log_prob, len(self.flows), 0)
        probs = torch.stack(log_prob, -1).exp()

        self.collector.reset()
        return torch.log(torch.sum(self.alpha * probs, -1))

    def pq_kl(self, q_dist):
        p_log_p = (q_dist.logits - self.alpha.log()) * q_dist.probs
        return p_log_p.sum(-1)

    def log_prob_with_sll(self, x):
        q_dist = self.sampler.cond_dist(context=x)
        with torch.no_grad():
            self.alpha = 0.99 * self.alpha + 0.01 * q_dist.probs.mean(0)

        selection = q_dist.sample()
        kld = self.pq_kl(q_dist)
        log_prob = torch.zeros_like(kld)
        sll = 0
        for i, flow in enumerate(self.flows):
            mask = selection == i
            _log_prob, _sll = flow.log_prob_with_sll(x[mask])
            log_prob[mask] = _log_prob
            sll += _sll
        _log_prob, _sll = self.base_dist.log_prob_with_sll(torch.cat(self.collector.z))
        sll += _sll
        for i, flow in enumerate(self.flows):
            mask = selection == i
            n = mask.sum()
            log_prob[mask] += _log_prob[:n]
            _log_prob = _log_prob[n:]

        sll += torch.mean(q_dist.log_prob(selection) * log_prob.detach(), 0)
        self.collector.reset()
        return log_prob - kld, sll

    def sample(self, num_samples):
        base_samples = self.base_dist.sample(num_samples)
        samples = []
        selection = torch.distributions.Categorical(probs=self.alpha).sample((num_samples,))
        for i, flow in enumerate(self.flows):
            mask = selection == i
            self.collector.samples = base_samples[mask]
            samples.append(flow.sample(mask.sum()))
        self.collector.reset()
        return torch.cat(samples)


class Mixture(FlowLayer):
    """A Mixture of multiple distributions."""

    def __init__(self, flows: List[FlowLayer], sampler: ConditionalCategorical):
        super(Mixture, self).__init__()
        self.flows = nn.ModuleList(flows)
        self.sampler = sampler
        self.register_buffer('alpha', torch.ones(len(flows)) / len(flows))

    def log_prob(self, x):
        log_probs = torch.stack([dist.log_prob(x) for dist in self.flows], dim=-1)
        return torch.log(torch.sum(self.alpha * log_probs.exp(), -1))

    def pq_kl(self, q_dist):
        p_log_p = (q_dist.logits - self.alpha.log()) * q_dist.probs
        return p_log_p.sum(-1)

    def log_prob_with_sll(self, x):
        q_dist = self.sampler.cond_dist(context=x)
        with torch.no_grad():
            self.alpha = 0.99 * self.alpha + 0.01 * q_dist.probs.mean(0)

        selection = q_dist.sample()
        kld = self.pq_kl(q_dist)
        log_prob = torch.zeros_like(kld)
        sll = 0
        for i, dist in enumerate(self.flows):
            mask = selection == i
            _log_prob, _sll = dist.log_prob_with_sll(x[mask])
            log_prob[mask] = _log_prob
            sll += _sll * mask.sum() / len(x)
        sll += torch.mean(q_dist.log_prob(selection) * log_prob.detach(), 0)
        return log_prob - kld, sll

    def sample(self, num_samples):
        selection = torch.distributions.Categorical(probs=self.alpha).sample((num_samples,))
        z = []
        for i, dist in enumerate(self.flows):
            z.append(dist.sample(torch.sum(selection)))
        return torch.cat(z)
