import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import time
import os  # 导入 os 模块用于获取 CPU 核心数

# --- PyTorch CPU 性能设置 ---
# 设置PyTorch在CPU上使用的线程数
# 这有助于PyTorch在执行内部可以并行的张量操作时利用多核。
# 对于GR4J主循环这种高度串行的部分，效果不明显，但对整体性能有帮助。
num_cpu_threads = os.cpu_count()  # 获取可用的CPU核心数
if num_cpu_threads is not None:
    torch.set_num_threads(num_cpu_threads)
    torch.set_num_interop_threads(num_cpu_threads)  # 用于并行执行不同操作的线程数
    print(f"PyTorch CPU 线程数设置为: {torch.get_num_threads()} (Inter-op: {torch.get_num_interop_threads()})")
else:
    print("无法获取CPU核心数，PyTorch将使用默认线程设置。")

# 确保PyTorch使用CPU设备
device = torch.device("cpu")
print(f"当前设备设置为: {device.type.upper()}")


# 定义 SH 曲线函数
@torch.jit.script  # 将SH_CURVE函数也进行JIT编译，以确保类型一致性
def SH1_CURVE(t: torch.Tensor, x4_val: torch.Tensor) -> torch.Tensor:
    t_float = t.to(torch.float32)
    x4_val_float = x4_val.to(torch.float32)

    if t_float <= 0.0:
        return torch.tensor(0.0, device=t.device, dtype=torch.float32)
    elif t_float < x4_val_float:
        return (t_float / x4_val_float) ** 2.5
    else:
        return torch.tensor(1.0, device=t.device, dtype=torch.float32)


@torch.jit.script  # 将SH_CURVE函数也进行JIT编译，以确保类型一致性
def SH2_CURVE(t: torch.Tensor, x4_val: torch.Tensor) -> torch.Tensor:
    t_float = t.to(torch.float32)
    x4_val_float = x4_val.to(torch.float32)

    if t_float <= 0.0:
        return torch.tensor(0.0, device=t.device, dtype=torch.float32)
    elif t_float <= x4_val_float:
        return 0.5 * (t_float / x4_val_float) ** 2.5
    elif t_float < 2 * x4_val_float:
        return 1 - 0.5 * (2 - t_float / x4_val_float) ** 2.5
    else:
        return torch.tensor(1.0, device=t.device, dtype=torch.float32)


# GR4J 模型模拟函数 - 使用 torch.jit.script 加速
@torch.jit.script
def simulate_gr4j_torch(P_input: torch.Tensor, E_input: torch.Tensor,
                        x1: torch.Tensor, x2: torch.Tensor, x3: torch.Tensor, x4: torch.Tensor,
                        nStep: int, S0_ratio: float, R0_ratio: float) -> torch.Tensor:
    device = P_input.device

    Pn = torch.where(P_input >= E_input, P_input - E_input, torch.tensor(0.0, device=device, dtype=P_input.dtype))
    En = torch.where(P_input < E_input, E_input - P_input, torch.tensor(0.0, device=device, dtype=P_input.dtype))

    maxDayDelay = 10

    SH1_list = torch.jit.annotate(list[torch.Tensor], [])  # JIT requires type annotation for lists
    for i in range(maxDayDelay):
        SH1_list.append(SH1_CURVE(torch.tensor(float(i + 1), device=device), x4))

    SH2_list = torch.jit.annotate(list[torch.Tensor], [])
    for i in range(2 * maxDayDelay):
        SH2_list.append(SH2_CURVE(torch.tensor(float(i + 1), device=device), x4))

    SH1 = torch.stack(SH1_list)
    SH2 = torch.stack(SH2_list)

    UH1 = torch.diff(torch.cat([torch.tensor([0.0], dtype=torch.float32, device=device), SH1]))
    UH2 = torch.diff(torch.cat([torch.tensor([0.0], dtype=torch.float32, device=device), SH2]))

    Q_list = torch.jit.annotate(list[torch.Tensor], [])

    S_TEMP = S0_ratio * x1
    R_TEMP = R0_ratio * x3

    UH_Fast_prev = torch.zeros(maxDayDelay, dtype=torch.float32, device=device)
    UH_Slow_prev = torch.zeros(2 * maxDayDelay, dtype=torch.float32, device=device)

    for i in range(nStep):
        current_Pn = Pn[i]
        current_En = En[i]

        tanh_pn_over_x1 = torch.tanh(current_Pn / x1)
        term_s_over_x1_sq = (S_TEMP / x1) ** 2
        Ps_val_numerator = x1 * (1 - term_s_over_x1_sq) * tanh_pn_over_x1
        Ps_val_denominator = 1 + S_TEMP / x1 * tanh_pn_over_x1
        Ps_val_raw = Ps_val_numerator / (Ps_val_denominator + 1e-9)

        tanh_en_over_x1 = torch.tanh(current_En / x1)
        term_s_over_x1 = S_TEMP / x1
        Es_val_numerator = S_TEMP * (2 - term_s_over_x1) * tanh_en_over_x1
        Es_val_denominator = 1 + (1 - term_s_over_x1) * tanh_en_over_x1
        Es_val_raw = Es_val_numerator / (Es_val_denominator + 1e-9)

        Ps_val = torch.where(current_Pn > 0, Ps_val_raw, torch.tensor(0.0, device=device, dtype=P_input.dtype))
        Es_val = torch.where(current_En > 0, Es_val_raw, torch.tensor(0.0, device=device, dtype=P_input.dtype))

        S_TEMP_new = S_TEMP - Es_val + Ps_val
        perc_term = (4.0 / 9.0 * (S_TEMP_new / x1)) ** 4
        Perc_val = S_TEMP_new * (1 - (1 + perc_term) ** (-0.25))
        Pr_val = Perc_val + (current_Pn - Ps_val)
        S_TEMP = S_TEMP_new - Perc_val

        F = x2 * (R_TEMP / x3) ** 3.5
        R_Fast = Pr_val * 0.9
        R_Slow = Pr_val * 0.1

        UH_Fast_current = R_Fast * UH1
        UH_Slow_current = R_Slow * UH2

        if i > 0:
            UH_Fast_current = UH_Fast_current + torch.cat(
                [UH_Fast_prev[1:], torch.tensor([0.0], device=device, dtype=P_input.dtype)])
            UH_Slow_current = UH_Slow_current + torch.cat(
                [UH_Slow_prev[1:], torch.tensor([0.0], device=device, dtype=P_input.dtype)])

        R_TEMP_new = torch.clamp(R_TEMP + UH_Fast_current[0] + F, min=0.0)
        Qr_val = R_TEMP_new * (1 - (1 + (R_TEMP_new / x3) ** 4) ** (-0.25))
        R_TEMP = R_TEMP_new - Qr_val
        Qd_val = torch.clamp(UH_Slow_current[0] + F, min=0.0)
        Q_val = Qr_val + Qd_val
        Q_list.append(Q_val)

        UH_Fast_prev = UH_Fast_current
        UH_Slow_prev = UH_Slow_current

    return torch.stack(Q_list)


# PyTorch GR4J 模型类
class GR4JModel_Torch(nn.Module):
    def __init__(self, nStep, upperTankRatio, lowerTankRatio, initial_params):
        super(GR4JModel_Torch, self).__init__()
        self.nStep = nStep
        self.upperTankRatio = upperTankRatio
        self.lowerTankRatio = lowerTankRatio

        self.x1 = nn.Parameter(torch.tensor(initial_params[0], dtype=torch.float32))
        self.x2 = nn.Parameter(torch.tensor(initial_params[1], dtype=torch.float32))
        self.x3 = nn.Parameter(torch.tensor(initial_params[2], dtype=torch.float32))
        self.x4 = nn.Parameter(torch.tensor(initial_params[3], dtype=torch.float32))

    def apply_parameter_bounds(self):
        with torch.no_grad():
            self.x1.data = torch.clamp(self.x1.data, 100.0, 1200.0)
            self.x2.data = torch.clamp(self.x2.data, -5.0, 3.0)
            self.x3.data = torch.clamp(self.x3.data, 20.0, 300.0)
            self.x4.data = torch.clamp(self.x4.data, 0.1, 7.0)

    def forward(self, P, E):
        return simulate_gr4j_torch(P, E, self.x1, self.x2, self.x3, self.x4,
                                   self.nStep, self.upperTankRatio, self.lowerTankRatio)


# NSE 损失函数 (最小化 1-NSE)
def nse_loss_torch(y_pred, y_true):
    mean_y_true = torch.mean(y_true)
    numerator = torch.sum((y_true - y_pred) ** 2)
    denominator = torch.sum((y_true - mean_y_true) ** 2)

    if denominator == 0:
        # 在CPU上，返回一个CPU张量
        return torch.tensor(1.0e9, device=y_true.device, dtype=y_true.dtype)

    nse = 1 - numerator / denominator
    return 1 - nse


# 读取数据
print("正在加载数据...")
other_para = np.loadtxt('others.txt')
data = np.loadtxt('inputData.txt')

area = other_para[0]
upperTankRatio = other_para[1]
lowerTankRatio = other_para[2]

# 将数据转换为 PyTorch 张量并指定设备 (CPU)
P_torch = torch.tensor(data[:, 0], dtype=torch.float32, device=device)
E_torch = torch.tensor(data[:, 1], dtype=torch.float32, device=device)
Qobs_torch = torch.tensor(data[:, 2], dtype=torch.float32, device=device)
Qobs_mm_torch = Qobs_torch * 86.4 / area  # m³/s -> mm/d
nStep = data.shape[0]

print(f"数据已加载: {nStep} 个时间步长, 水文流域面积: {area} 平方公里")

# --- 深度学习精调阶段 ---
print("\n--- 直接从指定参数开始深度学习精调 (纯CPU优化) ---")

# 您提供的普通遍历的最佳参数和NSE值
initial_params_for_dl = (100.0, -3.0, 100.0, 1.1)
initial_nse_from_grid = 0.8531

print(f"深度学习将从以下参数开始: x1={initial_params_for_dl[0]}, x2={initial_params_for_dl[1]}, "
      f"x3={initial_params_for_dl[2]}, x4={initial_params_for_dl[3]} (初始NSE: {initial_nse_from_grid:.4f})")

# 初始化深度学习模型 (在CPU上)
model_dl = GR4JModel_Torch(nStep, upperTankRatio, lowerTankRatio, initial_params=initial_params_for_dl).to(device)

# 使用 Adam 优化器
# 进一步增加学习率，以期更快收敛
optimizer_dl = optim.Adam(model_dl.parameters(), lr=0.05)  # 大幅增加学习率到 0.05

# 学习率调度器：降低 patience，使其更早地触发学习率衰减
scheduler_dl = optim.lr_scheduler.ReduceLROnPlateau(optimizer_dl, 'min', patience=5, factor=0.5, verbose=True)

# 训练参数
num_epochs_dl = 1000  # 减少总 epoch 数，因为学习率更高，期望更快收敛
warmup_period = 365

# 深度学习的初始最佳NSE设置为普通遍历的结果
best_nse_dl = initial_nse_from_grid
best_params_dl = initial_params_for_dl

# 使用初始参数模拟一次，得到初始的最佳模拟径流 (在CPU上)
with torch.no_grad():
    initial_Q_pred_dl = model_dl(P_torch, E_torch)
    best_Q_dl = initial_Q_pred_dl.cpu().numpy()  # 确保在CPU上操作并转换为numpy

print(
    "-----------------------------------------------------------------------------------------------------------------")
print(
    " Epoch | Loss (1-NSE) | NSE (当前) | x1 (集水区容量) | x2 (水量交换系数) | x3 (汇流库容量) | x4 (单位线时间常数) | 学习率 | Epoch耗时(s)")
print(
    "-----------------------------------------------------------------------------------------------------------------")

start_time_total = time.time()  # 记录总开始时间

for epoch in range(num_epochs_dl):
    epoch_start_time = time.time()  # 记录当前epoch开始时间

    optimizer_dl.zero_grad()

    Q_pred_dl = model_dl(P_torch, E_torch)

    Q_eval_dl = Q_pred_dl[warmup_period:]
    Qobs_eval_dl = Qobs_mm_torch[warmup_period:]

    loss_dl = nse_loss_torch(Q_eval_dl, Qobs_eval_dl)

    loss_dl.backward()
    optimizer_dl.step()

    model_dl.apply_parameter_bounds()

    current_lr_dl = optimizer_dl.param_groups[0]['lr']

    with torch.no_grad():
        current_nse_dl = 1 - loss_dl.item()

        if current_nse_dl > best_nse_dl:
            best_nse_dl = current_nse_dl
            best_params_dl = (model_dl.x1.item(), model_dl.x2.item(), model_dl.x3.item(), model_dl.x4.item())
            best_Q_dl = Q_pred_dl.cpu().numpy()  # 确保在CPU上操作并转换为numpy

    epoch_end_time = time.time()  # 记录当前epoch结束时间
    epoch_duration = epoch_end_time - epoch_start_time

    # **核心修改：每一个 epoch 都输出**
    print(
        f"{epoch + 1:6d} | {loss_dl.item():<12.6f} | {current_nse_dl:<9.4f} | {model_dl.x1.item():<15.1f} | {model_dl.x2.item():<19.2f} | {model_dl.x3.item():<15.1f} | {model_dl.x4.item():<20.2f} | {current_lr_dl:.6f} | {epoch_duration:.2f}")

    scheduler_dl.step(loss_dl)

total_duration = time.time() - start_time_total
print(
    "-----------------------------------------------------------------------------------------------------------------")
print("\n深度学习精调完成！")
print(f"总运行时间: {total_duration:.2f} 秒 ({total_duration / 60:.2f} 分钟)")
print(
    f"最终最优参数组合: x1={best_params_dl[0]:.1f}, x2={best_params_dl[1]:.2f}, x3={best_params_dl[2]:.1f}, x4={best_params_dl[3]:.2f}")
print(f"最终最优NSE值: {best_nse_dl:.4f}")

# Plot results
plt.figure(figsize=(12, 8))

# Plot 1: Full time series
plt.subplot(2, 1, 1)
# 确保绘图数据在CPU上
plt.plot(range(nStep), Qobs_mm_torch.cpu().numpy(), label="观测径流 (Observed Flow)", color="black", linewidth=1)
plt.plot(range(nStep), best_Q_dl, label="深度学习模拟径流 (Deep Learning Simulated Flow)", color="red", linestyle='--',
         linewidth=1)
plt.title(f"GR4J 深度学习精调结果 (GR4J Deep Learning Refinement Results) (NSE={best_nse_dl:.4f})")
plt.xlabel("时间 (天)")
plt.ylabel("流量 (mm/d)")
plt.legend()
plt.grid(True, alpha=0.3)

# Plot 2: Zoomed view (last 1000 days)
plt.subplot(2, 1, 2)
zoom_start = max(0, nStep - 1000)
# 确保绘图数据在CPU上
plt.plot(range(zoom_start, nStep), Qobs_mm_torch[zoom_start:].cpu().numpy(), label="观测径流 (Observed Flow)",
         color="black", linewidth=1)
plt.plot(range(zoom_start, nStep), best_Q_dl[zoom_start:], label="深度学习模拟径流 (Deep Learning Simulated Flow)",
         color="red", linestyle='--', linewidth=1)
plt.title("详细视图 (最后1000天) (Detailed View (Last 1000 days))")
plt.xlabel("时间 (天)")
plt.ylabel("流量 (mm/d)")
plt.legend()
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# Print parameter comparison
print("\n参数对比 (Parameter Comparison):")
print("----------------------------------------------------------")
print(" 方法        | x1      | x2     | x3      | x4     | NSE    ")
print("----------------------------------------------------------")
print(
    f" 初始普通遍历 | {initial_params_for_dl[0]:7.1f} | {initial_params_for_dl[1]:6.2f} | {initial_params_for_dl[2]:7.1f} | {initial_params_for_dl[3]:6.2f} | {initial_nse_from_grid:.4f} ")
print(
    f" 深度学习精调 | {best_params_dl[0]:7.1f} | {best_params_dl[1]:6.2f} | {best_params_dl[2]:7.1f} | {best_params_dl[3]:6.2f} | {best_nse_dl:.4f} ")
print("----------------------------------------------------------")