import math
import stat
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, TypeAlias, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim.adamw

def log_softmax(x:torch.Tensor, dim=1)->torch.Tensor:
    max_x, _ = x.max(dim=dim, keepdim=True)
    ex = (x-max_x).exp()
    # sum_ex = ex.sum(dim=dim, keepdim=True)
    log_sum_exp = torch.log(torch.sum(ex, dim=dim, keepdim=True))
    log_probs = (x - max_x) - log_sum_exp
    return log_probs


# TODO: 4.1 Loss: Cross-Entropy
class CrossEntropyLoss(nn.Module):
    def __init__(self):
        super(CrossEntropyLoss, self).__init__()
        pass

    def forward(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
        """Given a tensor of inputs and targets, compute the average cross-entropy
        loss across examples.

        Args:
            inputs (Float[Tensor, "batch_size vocab_size"]): inputs[i][j] is the
                unnormalized logit of jth class for the ith example.
            targets (Int[Tensor, "batch_size"]): Tensor of shape (batch_size,) with the index of the correct class.
                Each value must be between 0 and `num_classes - 1`.

        Returns:
            Float[Tensor, ""]: The average cross-entropy loss across examples.
        """
        log_probs = log_softmax(inputs, dim=1)
        gathered_log_probs = log_probs.gather(dim=1, index=targets.unsqueeze(1))
        
        nll = -gathered_log_probs
        
        loss = nll.mean()
        
        return loss


# TODO: 4.2 SGD
ParamsT: TypeAlias = Union[
    Iterable[torch.Tensor], Iterable[Dict[str, Any]], Iterable[Tuple[str, torch.Tensor]]
]
class SGD(torch.optim.Optimizer):
    def __init__(self, params: ParamsT, lr=1e-3):
        self.lr = lr
        defaults = {'lr':self.lr}
        super(SGD, self).__init__(params, defaults)
        
    def step(self, closure: Optional[Callable] = None):
        loss = None if closure is None else closure()
        for group in self.param_groups:
            lr = group['lr']
            for p in group['params']:
                if p.grad is None:
                    continue
                state = self.state[p] # Get state associated with p. 
                t = state.get("t", 0) # Get iteration number from the state, or initial value. 
                grad = p.grad.data # Get the gradient of loss with respect to p.  
                p.data -= lr / math.sqrt(t + 1) * grad # Update weight tensor in-place. 
                state["t"] = t + 1 # Increment iteration number.
        return loss
    

# TODO: 4.3 AdamW
class AdamW(torch.optim.Optimizer):
    # torch.optim.AdamW
    def __init__(self, 
        params: ParamsT,
        lr: Union[float, torch.Tensor] = 1e-3,
        betas: Tuple[Union[float, torch.Tensor], Union[float, torch.Tensor]] = (0.9, 0.999),
        eps: float = 1e-8,
        weight_decay: float = 1e-2
    ):
        defaults = dict(
            lr=lr,
            betas=betas,
            eps=eps,
            weight_decay=weight_decay,
        )
        super().__init__(params, defaults)

    @torch.no_grad
    def step(self, closure:Optional[Callable]=None):
        loss = None if closure is None else closure()
        for group in self.param_groups:
            # 获取超参数
            lr = group['lr']
            beta1, beta2 = group['betas']
            weight_decay = group['weight_decay']
            eps = group['eps']
            
            for p in group['params']:
                # 读
                if p.grad is None: continue
                grad = p.grad
                if grad.is_sparse:
                    raise RuntimeError('AdamW does not support sparse gradients')
                state = self.state[p]
                m = state.get('momentum', torch.zeros_like(p.data))
                v = state.get('velocity', torch.zeros_like(p.data))
                t = state.get('t', 0) + 1
                # 算
                m = beta1*m + (1-beta1)*grad
                v = beta2 * v + (1-beta2)*grad.pow(2)
                lr_t = lr * np.sqrt(1-np.pow(beta2,t)) / (1-np.pow(beta1,t))
                p -= lr_t*m/(v.sqrt()+eps)
                p -= lr*weight_decay*p
                # 写
                state['momentum'] = m
                state['velocity'] = v
                state['t'] = t
        return loss

# TODO: 4.4 Learning rate scheduling
def get_lr_cosine_schedule(
    it: int,
    max_learning_rate: float,
    min_learning_rate: float,
    warmup_iters: int,
    cosine_cycle_iters: int,
):
    """
    Given the parameters of a cosine learning rate decay schedule (with linear
    warmup) and an iteration number, return the learning rate at the given
    iteration under the specified schedule.

    Args:
        it (int): Iteration number to get learning rate for.
        max_learning_rate (float): alpha_max, the maximum learning rate for
            cosine learning rate schedule (with warmup).
        min_learning_rate (float): alpha_min, the minimum / final learning rate for
            the cosine learning rate schedule (with warmup).
        warmup_iters (int): T_w, the number of iterations to linearly warm-up
            the learning rate.
        cosine_cycle_iters (int): T_c, the number of cosine annealing iterations.

    Returns:
        Learning rate at the given iteration under the specified schedule.
    """
    if it < warmup_iters:
        alpha_t = max_learning_rate * it / warmup_iters
    elif warmup_iters <= it <= cosine_cycle_iters:
        alpha_t = min_learning_rate + 0.5*(1+np.cos(np.pi*(it-warmup_iters)/(cosine_cycle_iters-warmup_iters)))*(max_learning_rate-min_learning_rate)
    else:
        alpha_t = min_learning_rate
    
    return alpha_t

# TODO: Training loop:

# TODO: 4.5 Gradient clipping
def gradient_clipping(
    parameters: Iterable[torch.nn.Parameter], max_l2_norm: float
) -> None:
    """Given a set of parameters, clip their combined gradients to have l2 norm at most max_l2_norm.

    Args:
        parameters (Iterable[torch.nn.Parameter]): collection of trainable parameters.
        max_l2_norm (float): a positive value containing the maximum l2-norm.

    The gradients of the parameters (parameter.grad) should be modified in-place.
    """
    grads = [p.grad.detach() for p in parameters if p.grad is not None]
    if not grads:
        return
    # 这里应该计算总的而不是单个的
    total_norm = torch.norm(torch.stack(grads), p=2)
    if total_norm > max_l2_norm:
        for p in parameters:
            if p.grad is not None:
                p.grad.data.mul_(max_l2_norm / total_norm)
        

# TODO: 5 Training loop

# TODO: 5.1 Data Loader

# TODO: 5.2 Checkpointing

# TODO: 5.3 Training loop

# TODO: 6 Generating text

# TODO: 7 Experiments
