import torch
from torch.optim.optimizer import Optimizer
import torch
import torch.nn as nn
import time
import time
from torch.optim import Adam

class Lion(Optimizer):
  r"""Implements Lion algorithm."""

  def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):
    """Initialize the hyperparameters.

    Args:
      params (iterable): iterable of parameters to optimize or dicts defining
        parameter groups
      lr (float, optional): learning rate (default: 1e-4)
      betas (Tuple[float, float], optional): coefficients used for computing
        running averages of gradient and its square (default: (0.9, 0.99))
      weight_decay (float, optional): weight decay coefficient (default: 0)
    """

    if not 0.0 <= lr:
      raise ValueError('Invalid learning rate: {}'.format(lr))
    if not 0.0 <= betas[0] < 1.0:
      raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
    if not 0.0 <= betas[1] < 1.0:
      raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
    defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)
    super().__init__(params, defaults)

  @torch.no_grad()
  def step(self, closure=None):
    """Performs a single optimization step.

    Args:
      closure (callable, optional): A closure that reevaluates the model
        and returns the loss.

    Returns:
      the loss.
    """
    loss = None
    if closure is not None:
      with torch.enable_grad():
        loss = closure()

    for group in self.param_groups:
      for p in group['params']:
        if p.grad is None:
          continue

        # Perform stepweight decay
        p.data.mul_(1 - group['lr'] * group['weight_decay'])

        grad = p.grad
        state = self.state[p]
        # State initialization
        if len(state) == 0:
          # Exponential moving average of gradient values
          state['exp_avg'] = torch.zeros_like(p)

        exp_avg = state['exp_avg']
        beta1, beta2 = group['betas']

        # Weight update
        update = exp_avg * beta1 + grad * (1 - beta1)

        p.add_(update.sign_(), alpha=-group['lr'])

        # Decay the momentum running average coefficient
        exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)

    return loss
# 定义损失函数
def loss(f, target=None):
    if target is None:
        return torch.sum(torch.square(f)) / f.shape[0]
    if isinstance(target, float):
        return torch.sum(torch.square(f - target)) / f.shape[0]
    else:
        return nn.MSELoss()(f, target)

# 定义熵项正则化
def entropy_regularization(params):
    entropy = 0.0
    for p in params:
        entropy += torch.mean(p**2)  # 熵项正则化：参数的均值平方和
    return entropy

# 定义能量场函数
def energy_field(params):
    energy = 0.0
    for p in params:
        if p.grad is not None:
            energy += torch.sum(torch.abs(p.grad))  # 势能项为梯度模长
    return energy
import torch
import time
from torch.optim import Adam

# 假设已经定义了 Lion 优化器和 loss，entropy_regularization，energy_field等函数

def train(net, PDE, BC, point_sets, flags, iterations=50000, lr=5e-4, info_num=100,
          test_in=None, test_out=None, w=[1., 1., 1., 1.], inv_params=[],
          entropy_weight_init=0.1, energy_weight=0.1, warmup_steps=5000,beta1=0.9,beta2=0.99,weight_decay=0.0):
    """
    Train the model using a combination of Adam and Lion optimizer.
    The optimizer starts with Adam for the first `warmup_steps`, and then switches to Lion.
    
    Args:
        net: The neural network model.
        PDE: Function for the PDE term.
        BC: Function for the boundary conditions.
        point_sets: List of point sets for different types of data ('BC', 'IC', etc.).
        flags: Flags for each data type to control behavior.
        iterations: Total number of training iterations.
        lr: Learning rate for Adam.
        info_num: How often to print progress information.
        test_in, test_out: Test data for evaluating the model.
        w: List of weights for different loss components.
        inv_params: List of inverse problem parameters, if applicable.
        entropy_weight_init: Initial entropy weight.
        energy_weight: Weight for the energy term.
        warmup_steps: Number of steps to use Adam before switching to Lion.
    """

    if inv_params == []:
        params = list(net.parameters())
    else:
        params = list(net.parameters()) + inv_params
    # Initialize Lion optimizer (using default hyperparameters)
    lion_optimizer = Lion(
    net.parameters(),
    lr=lr,                  # 学习率
    betas=(beta1, beta2),   # 使用 beta1 和 beta2 代替 momentum
    weight_decay=weight_decay  # 权重衰减
)
    adam_optimizer = Adam(params, lr=lr)
    optimizer =adam_optimizer
    # 数据统计
    n_bc = n_ic = n_PDE = n_data = 0
    for points, flag in zip(point_sets, flags):
        if flag[0] == 'BC': n_bc += points.shape[0]
        if flag[0] == 'IC': n_ic += points.shape[0]
        if flag[0] == 'domain': n_PDE += points.shape[0]
        if flag[0] == 'data': n_data += points.shape[0]

    start_time = time.time()
    entropy_weight = entropy_weight_init
    decay_rate = entropy_weight_init / iterations

    # 历史记录
    l_history, pde_loss_history, bc_loss_history = [], [], []
    ic_loss_history, data_loss_history = [], []
    err_history = [] if test_in is not None else None
 
    for epoch in range(iterations):
        optimizer.zero_grad()
        l_BC = l_IC = l_PDE = l_data = 0

        for points, flag in zip(point_sets, flags):
            if flag[0] == 'BC':
                f = BC(points[:, 0:1], points[:, 1:2], points[:, 2:3], points[:, 3:4], net, flag[1])
                l_BC += loss(f) * points.shape[0] / n_bc
            if flag[0] == 'IC':
                pred = net(points)
                l_IC += loss(pred, flag[1]) * points.shape[0] / n_ic
            if flag[0] == 'data':
                pred = net(points)
                l_data += loss(pred, flag[1]) * points.shape[0] / n_data
            if flag[0] == 'domain':
                f = PDE(points[:, 0:1], points[:, 1:2], points[:, 2:3], points[:, 3:4], net)
                l_PDE += loss(f) * points.shape[0] / n_PDE

        # 计算熵正则化和能量场
        cost = (w[0] * l_BC + w[1] * l_IC + w[2] * l_PDE + w[3] * l_data) / 4
        cost.backward(retain_graph=True)  # 确保计算梯度

        entropy_loss = entropy_regularization(params) * entropy_weight
        energy_loss = energy_field(params) * energy_weight

        # 动态调整熵项权重
        entropy_weight = max(0.0, entropy_weight - decay_rate)

        # 总损失
        total_loss = cost + entropy_loss + energy_loss

        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        # 记录历史
        l_history.append(total_loss.item())
        pde_loss_history.append(l_PDE.item())
        bc_loss_history.append(l_BC.item())
        ic_loss_history.append(l_IC.item())
        data_loss_history.append(l_data.item())
        if epoch >= warmup_steps:
            optimizer = lion_optimizer
        if epoch % info_num == 0:
            elapsed = time.time() - start_time
            if test_in is not None and test_out is not None:
                with torch.no_grad():  # 在评估时不需要计算梯度
                    pred_test = net(test_in)  # 获取预测结果
                    test_loss = loss(pred_test, test_out)  # 计算测试集损失
                print(f"It: {epoch}, Loss: {total_loss:.3e}, BC: {l_BC:.3e}, IC: {l_IC:.3e}, "
                      f"PDE: {l_PDE:.3e}, Data: {l_data:.3e}, Entropy: {entropy_loss:.3e}, "
                      f"Energy: {energy_loss:.3e}, Test Loss: {test_loss:.3e}, Time: {elapsed:.2f}")
            else:
                print(f"It: {epoch}, Loss: {total_loss:.3e}, BC: {l_BC:.3e}, IC: {l_IC:.3e}, "
                      f"PDE: {l_PDE:.3e}, Data: {l_data:.3e}, Entropy: {entropy_loss:.3e}, "
                      f"Energy: {energy_loss:.3e}, Time: {elapsed:.2f}")

        start_time = time.time()

    if test_in is not None:
        return l_history, err_history, pde_loss_history, bc_loss_history, ic_loss_history, data_loss_history
    else:
        return l_history, None, pde_loss_history, bc_loss_history, ic_loss_history, data_loss_history