import os
import json
import copy
import math
from collections import OrderedDict
from typing import Optional, Tuple, Union

import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F

from utils.tools import get_mask_from_lengths, pad

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class ImprovedVarianceAdaptor(nn.Module):
    """Improved Variance Adaptor with better efficiency and features"""

    def __init__(self, preprocess_config, model_config, use_cached_bins=True):
        super(ImprovedVarianceAdaptor, self).__init__()

        # Initialize predictors with shared architecture
        self.duration_predictor = ImprovedVariancePredictor(model_config, output_dim=1)
        self.pitch_predictor = ImprovedVariancePredictor(model_config, output_dim=1)
        self.energy_predictor = ImprovedVariancePredictor(model_config, output_dim=1)
        self.length_regulator = ImprovedLengthRegulator()

        # Feature levels
        self.pitch_feature_level = preprocess_config["preprocessing"]["pitch"]["feature"]
        self.energy_feature_level = preprocess_config["preprocessing"]["energy"]["feature"]

        # Validate feature levels
        valid_levels = ["phoneme_level", "frame_level"]
        assert self.pitch_feature_level in valid_levels, f"Invalid pitch feature level: {self.pitch_feature_level}"
        assert self.energy_feature_level in valid_levels, f"Invalid energy feature level: {self.energy_feature_level}"

        # Initialize embeddings and bins
        self._initialize_embeddings_and_bins(preprocess_config, model_config, use_cached_bins)

        # Add learnable control parameters
        self.register_parameter("pitch_control_weight", nn.Parameter(torch.ones(1)))
        self.register_parameter("energy_control_weight", nn.Parameter(torch.ones(1)))
        self.register_parameter("duration_control_weight", nn.Parameter(torch.ones(1)))

    def _initialize_embeddings_and_bins(self, preprocess_config, model_config, use_cached_bins):
        """Initialize pitch/energy bins and embeddings efficiently"""

        # Configuration
        pitch_quantization = model_config["variance_embedding"]["pitch_quantization"]
        energy_quantization = model_config["variance_embedding"]["energy_quantization"]
        n_bins = model_config["variance_embedding"]["n_bins"]
        hidden_dim = model_config["transformer"]["encoder_hidden"]

        # Validate quantization methods
        valid_quant = ["linear", "log"]
        assert pitch_quantization in valid_quant, f"Invalid pitch quantization: {pitch_quantization}"
        assert energy_quantization in valid_quant, f"Invalid energy quantization: {energy_quantization}"

        # Load or compute statistics
        stats_path = os.path.join(preprocess_config["path"]["preprocessed_path"], "stats.json")

        if use_cached_bins and os.path.exists(stats_path):
            with open(stats_path) as f:
                stats = json.load(f)
                pitch_min, pitch_max = stats["pitch"][:2]
                energy_min, energy_max = stats["energy"][:2]
        else:
            # Fallback values if stats not available
            pitch_min, pitch_max = 80.0, 400.0
            energy_min, energy_max = 0.0, 1.0
            print(f"Warning: Using fallback statistics for pitch/energy bins")

        # Create bins more efficiently
        self.pitch_bins = self._create_bins(pitch_min, pitch_max, n_bins, pitch_quantization)
        self.energy_bins = self._create_bins(energy_min, energy_max, n_bins, energy_quantization)

        # Initialize embeddings with better initialization
        self.pitch_embedding = nn.Embedding(n_bins, hidden_dim)
        self.energy_embedding = nn.Embedding(n_bins, hidden_dim)

        # Better initialization
        nn.init.normal_(self.pitch_embedding.weight, std=0.1)
        nn.init.normal_(self.energy_embedding.weight, std=0.1)

    def _create_bins(self, min_val: float, max_val: float, n_bins: int, quantization: str) -> nn.Parameter:
        """Create quantization bins with proper handling of edge cases"""

        # Add small epsilon to avoid edge cases
        eps = 1e-8
        min_val = max(min_val, eps)
        max_val = max(max_val, min_val + eps)

        if quantization == "log":
            bins = torch.exp(torch.linspace(np.log(min_val), np.log(max_val), n_bins - 1))
        else:
            bins = torch.linspace(min_val, max_val, n_bins - 1)

        return nn.Parameter(bins, requires_grad=False)

    def get_pitch_embedding(self, x: torch.Tensor, target: Optional[torch.Tensor],
                            mask: torch.Tensor, control: float) -> Tuple[torch.Tensor, torch.Tensor]:
        """Get pitch prediction and embedding with improved control"""

        prediction = self.pitch_predictor(x, mask)

        if target is not None:
            # Use target for training
            embedding = self.pitch_embedding(torch.bucketize(target, self.pitch_bins))
        else:
            # Use prediction for inference with learnable control
            controlled_prediction = prediction * control * self.pitch_control_weight
            embedding = self.pitch_embedding(torch.bucketize(controlled_prediction, self.pitch_bins))

        return prediction, embedding

    def get_energy_embedding(self, x: torch.Tensor, target: Optional[torch.Tensor],
                             mask: torch.Tensor, control: float) -> Tuple[torch.Tensor, torch.Tensor]:
        """Get energy prediction and embedding with improved control"""

        prediction = self.energy_predictor(x, mask)

        if target is not None:
            # Use target for training
            embedding = self.energy_embedding(torch.bucketize(target, self.energy_bins))
        else:
            # Use prediction for inference with learnable control
            controlled_prediction = prediction * control * self.energy_control_weight
            embedding = self.energy_embedding(torch.bucketize(controlled_prediction, self.energy_bins))

        return prediction, embedding

    def forward(self, x: torch.Tensor, src_mask: torch.Tensor,
                mel_mask: Optional[torch.Tensor] = None,
                max_len: Optional[int] = None,
                pitch_target: Optional[torch.Tensor] = None,
                energy_target: Optional[torch.Tensor] = None,
                duration_target: Optional[torch.Tensor] = None,
                p_control: float = 1.0,
                e_control: float = 1.0,
                d_control: float = 1.0) -> Tuple[torch.Tensor, ...]:

        # Duration prediction
        log_duration_prediction = self.duration_predictor(x, src_mask)

        # Phoneme-level pitch/energy
        if self.pitch_feature_level == "phoneme_level":
            pitch_prediction, pitch_embedding = self.get_pitch_embedding(
                x, pitch_target, src_mask, p_control
            )
            x = x + pitch_embedding

        if self.energy_feature_level == "phoneme_level":
            energy_prediction, energy_embedding = self.get_energy_embedding(
                x, energy_target, src_mask, e_control
            )
            x = x + energy_embedding

        # Length regulation
        if duration_target is not None:
            x, mel_len = self.length_regulator(x, duration_target, max_len)
            duration_rounded = duration_target
        else:
            # Improved duration prediction with learnable control
            duration_rounded = torch.clamp(
                torch.round(torch.exp(log_duration_prediction) - 1) * d_control * self.duration_control_weight,
                min=0
            )
            x, mel_len = self.length_regulator(x, duration_rounded, max_len)
            mel_mask = get_mask_from_lengths(mel_len)

        # Frame-level pitch/energy
        if self.pitch_feature_level == "frame_level":
            pitch_prediction, pitch_embedding = self.get_pitch_embedding(
                x, pitch_target, mel_mask, p_control
            )
            x = x + pitch_embedding

        if self.energy_feature_level == "frame_level":
            energy_prediction, energy_embedding = self.get_energy_embedding(
                x, energy_target, mel_mask, e_control
            )
            x = x + energy_embedding

        return (
            x,
            pitch_prediction,
            energy_prediction,
            log_duration_prediction,
            duration_rounded,
            mel_len,
            mel_mask,
        )


class ImprovedLengthRegulator(nn.Module):
    """Improved Length Regulator with better efficiency"""

    def __init__(self):
        super(ImprovedLengthRegulator, self).__init__()

    def forward(self, x: torch.Tensor, duration: torch.Tensor, max_len: Optional[int] = None) -> Tuple[
        torch.Tensor, torch.Tensor]:
        """More efficient length regulation"""

        batch_size = x.size(0)
        device = x.device

        # Calculate output lengths
        mel_lengths = duration.sum(dim=1).long()

        if max_len is None:
            max_len = mel_lengths.max().item()

        # Pre-allocate output tensor
        output = torch.zeros(batch_size, max_len, x.size(-1), device=device, dtype=x.dtype)

        # Efficient expansion using advanced indexing
        for b in range(batch_size):
            input_seq = x[b]
            durations = duration[b]

            # Create expansion indices
            expanded_indices = torch.repeat_interleave(
                torch.arange(len(durations), device=device),
                durations.long()
            )

            # Limit to max_len
            if len(expanded_indices) > max_len:
                expanded_indices = expanded_indices[:max_len]

            # Expand and assign
            output[b, :len(expanded_indices)] = input_seq[expanded_indices]

        return output, mel_lengths


class ImprovedVariancePredictor(nn.Module):
    """Improved Variance Predictor with residual connections and better architecture"""

    def __init__(self, model_config, output_dim=1):
        super(ImprovedVariancePredictor, self).__init__()

        self.input_size = model_config["transformer"]["encoder_hidden"]
        self.filter_size = model_config["variance_predictor"]["filter_size"]
        self.kernel = model_config["variance_predictor"]["kernel_size"]
        self.dropout = model_config["variance_predictor"]["dropout"]
        self.output_dim = output_dim

        # Improved architecture with residual connections
        self.conv_layers = nn.ModuleList([
            ConvBlock(
                self.input_size if i == 0 else self.filter_size,
                self.filter_size,
                kernel_size=self.kernel,
                dropout=self.dropout,
                use_residual=(i > 0)
            )
            for i in range(2)
        ])

        # Output projection with optional multiple outputs
        self.output_projection = nn.Sequential(
            nn.Linear(self.filter_size, self.filter_size // 2),
            nn.ReLU(),
            nn.Dropout(self.dropout),
            nn.Linear(self.filter_size // 2, output_dim)
        )

    def forward(self, encoder_output: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        """Forward pass with improved efficiency"""

        x = encoder_output

        # Apply conv layers
        for conv_layer in self.conv_layers:
            x = conv_layer(x)

        # Output projection
        out = self.output_projection(x)

        if self.output_dim == 1:
            out = out.squeeze(-1)

        # Apply mask if provided
        if mask is not None:
            if self.output_dim == 1:
                out = out.masked_fill(mask, 0.0)
            else:
                out = out.masked_fill(mask.unsqueeze(-1), 0.0)

        return out


class ConvBlock(nn.Module):
    """Convolution block with residual connection and improved normalization"""

    def __init__(self, in_channels, out_channels, kernel_size=3, dropout=0.1, use_residual=True):
        super(ConvBlock, self).__init__()

        self.use_residual = use_residual and (in_channels == out_channels)

        padding = (kernel_size - 1) // 2

        self.conv = ImprovedConv1d(in_channels, out_channels, kernel_size, padding=padding)
        self.norm = nn.LayerNorm(out_channels)
        self.activation = nn.GELU()  # GELU often works better than ReLU
        self.dropout = nn.Dropout(dropout)

        # Residual projection if needed
        if self.use_residual and in_channels != out_channels:
            self.residual_proj = nn.Linear(in_channels, out_channels)
        else:
            self.residual_proj = None

    def forward(self, x):
        residual = x

        # Main path
        x = self.conv(x)
        x = self.norm(x)
        x = self.activation(x)
        x = self.dropout(x)

        # Residual connection
        if self.use_residual:
            if self.residual_proj is not None:
                residual = self.residual_proj(residual)
            x = x + residual

        return x


class ImprovedConv1d(nn.Module):
    """Improved 1D Convolution with better initialization"""

    def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
                 padding=0, dilation=1, bias=True, w_init="xavier_uniform"):
        super(ImprovedConv1d, self).__init__()

        self.conv = nn.Conv1d(
            in_channels, out_channels, kernel_size=kernel_size,
            stride=stride, padding=padding, dilation=dilation, bias=bias
        )

        # Better weight initialization
        if w_init == "xavier_uniform":
            nn.init.xavier_uniform_(self.conv.weight)
        elif w_init == "xavier_normal":
            nn.init.xavier_normal_(self.conv.weight)
        elif w_init == "kaiming_uniform":
            nn.init.kaiming_uniform_(self.conv.weight, nonlinearity='relu')
        elif w_init == "kaiming_normal":
            nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')

        if bias:
            nn.init.constant_(self.conv.bias, 0.0)

    def forward(self, x):
        # Transpose to (batch, channels, time)
        x = x.transpose(1, 2)
        x = self.conv(x)
        # Transpose back to (batch, time, channels)
        x = x.transpose(1, 2)
        return x

Conv1d=ImprovedConv1d
VariancePredictor=ImprovedVariancePredictor
LengthRegulator=ImprovedLengthRegulator
VarianceAdaptor=ImprovedVarianceAdaptor
