import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
import base64
import sympy as sp
from sympy.parsing.sympy_parser import parse_expr


class PINN(nn.Module):
    """支持二阶导数的 PINN 模型"""

    def __init__(self, input_dim=1, hidden_dim=64, output_dim=1):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, t):
        return self.net(t)


def solve_dae_system(equations_str, t_max=5.0, epochs=5000, lr=0.001):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 解析方程和变量
    equations = [eq.strip() for eq in equations_str.split('\n') if eq.strip()]
    variables, highest_orders = detect_variables_and_orders(equations)
    output_dim = len(variables)

    # 初始化模型
    model = PINN(output_dim=output_dim).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=500)

    # 训练数据
    t_train = torch.linspace(0, t_max, 100, requires_grad=True).view(-1, 1).to(device)

    # 训练循环
    for epoch in range(epochs):
        optimizer.zero_grad()

        # 前向传播
        u_pred = model(t_train)

        # 计算一阶导数
        du_dt = torch.autograd.grad(
            outputs=u_pred, inputs=t_train,
            grad_outputs=torch.ones_like(u_pred),
            create_graph=True
        )[0]

        # 计算二阶导数（如果需要）
        d2u_dt2 = None
        if any(order >= 2 for order in highest_orders.values()):
            d2u_dt2 = torch.autograd.grad(
                outputs=du_dt, inputs=t_train,
                grad_outputs=torch.ones_like(du_dt),
                create_graph=True
            )[0]

        # 计算残差
        loss = 0
        for eq in equations:
            residual = compute_residual(
                eq, t_train, u_pred, du_dt, d2u_dt2,
                variables, highest_orders
            )
            loss += torch.mean(residual ** 2)

        # 初始条件（示例：u(0)=1, u'(0)=0）
        if epoch < epochs // 2:
            loss += torch.mean((u_pred[0] - 1.0) ** 2)  # u(0)=1
            if highest_orders.get('u', 0) >= 1:
                loss += torch.mean((du_dt[0]) ** 2)  # u'(0)=0

        loss.backward()
        optimizer.step()
        scheduler.step(loss)

        if epoch % 500 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.4f}, LR: {optimizer.param_groups[0]['lr']:.6f}")

    # 生成结果
    plot = generate_plot(model, t_max, variables, device)
    table_data = generate_table_data(model, t_max, variables, device)

    return {
        'plot': plot,
        'table_data': table_data,
        'variables': variables
    }


def detect_variables_and_orders(equations):
    """检测变量及其最高阶导数"""
    variables = set()
    orders = {}

    for eq in equations:
        # 替换导数符号（u'' -> u_d2）
        eq_processed = eq.replace("''", "_d2").replace("'", "_d1")
        expr = parse_expr(eq_processed)

        # 提取变量和阶数
        for symbol in expr.free_symbols:
            s = str(symbol)
            if s.startswith(('u_', 'v_', 'w_')):
                base_var = s.split('_')[0]
                variables.add(base_var)
                order = int(s.split('_')[1][1:])  # 提取 d1/d2 的数字部分
                orders[base_var] = max(orders.get(base_var, 0), order)
            elif s in ['u', 'v', 'w']:
                variables.add(s)
                orders[s] = max(orders.get(s, 0), 0)

    return sorted(variables), orders


def compute_residual(eq, t, u, du_dt, d2u_dt2, variables, orders):
    """计算方程残差（支持二阶导数）"""
    # 预处理方程
    eq_processed = eq.replace("''", "_d2").replace("'", "_d1")
    expr = parse_expr(eq_processed)

    # 替换符号为实际值
    residual = expr.subs('t', t)
    for i, var in enumerate(variables):
        residual = residual.subs(var, u[:, i])
        if orders.get(var, 0) >= 1:
            residual = residual.subs(f"{var}_d1", du_dt[:, i])
        if orders.get(var, 0) >= 2 and d2u_dt2 is not None:
            residual = residual.subs(f"{var}_d2", d2u_dt2[:, i])

    return residual

def generate_plot(model, t_max, variables, device, n_points=100):
    """生成解的 SVG 图像"""
    t_test = torch.linspace(0, t_max, n_points).view(-1, 1).to(device)
    with torch.no_grad():
        u_pred = model(t_test).cpu().numpy()

    plt.figure(figsize=(10, 6))
    for i, var in enumerate(variables):
        plt.plot(t_test.cpu().numpy(), u_pred[:, i], label=f"{var}(t)")

    plt.xlabel('t')
    plt.legend()

    # 转换为 SVG
    buffer = BytesIO()
    plt.savefig(buffer, format='svg')
    plt.close()
    return base64.b64encode(buffer.getvalue()).decode('utf-8')


def generate_table_data(model, t_max, variables, device, n_points=20):
    """生成表格数据"""
    t_test = torch.linspace(0, t_max, n_points).view(-1, 1).to(device)
    with torch.no_grad():
        u_pred = model(t_test).cpu().numpy()

    return [
        {'t': float(t), **{var: float(u_pred[i][j]) for j, var in enumerate(variables)}}
        for i, t in enumerate(t_test.cpu().numpy().flatten())
    ]