import numpy as np
import torch
import argparse
import sys 
import logging
import contextlib
from torch.utils.data import TensorDataset
from cybertorch.units import Length
from cybertorch.embedding import MolEmbedding
from cybertorch.model import MolCT
from cybertorch.readout import GFNReadout, AtomwiseReadout
from cybertorch.cybertron import Cybertron
from cybertorch.utils import set_cybertorch_global_device,set_seed
from cybertorch.train import MolWithLossCell, MolWithEvalCell, MSELoss
from cybertorch.train.new_metric import RMSE
from cybertorch.train.model import Model
from cybertorch.dataset import MolecularDataset

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
set_cybertorch_global_device(device)

# 设置随机种子
seed = 5
set_seed(seed,seed,seed)
logging.info(f"Using device: {device}")
logging.info(f"Random seed: {seed}")

data = np.load('/lustre/grp/gyqlab/fanc/pimm/dataset_8_2918.npz')
force = data['F']
atoms = data['Z']
coords = data['R']
num_atoms = atoms.shape[1]  # 获取原子数量
scale = float(np.sqrt(np.var(force)))
dataset = TensorDataset(torch.tensor(coords,dtype=torch.float32).to(device),
                     torch.tensor(atoms,dtype=torch.int32).to(device),
                     torch.tensor(force/scale,dtype=torch.float32).to(device))

dataset = MolecularDataset(dataset,batch_size=32)

dim_feature = 128
emb = MolEmbedding(
        dim_node=dim_feature,
        emb_dis=True,
        emb_bond=False,
        cutoff=Length(0.6,'nm'),
        cutoff_fn='smooth',
        rbf_fn='gaussian',
        num_basis=dim_feature,
        activation='silu',
        length_unit='nm',
        )

mod = MolCT(
        dim_feature=dim_feature,
        dim_edge_emb=emb.dim_edge,
        n_interaction=3,
        n_heads=8,
        activation='silu',
        coupled_interaction=False,
        )

readout = GFNReadout(
        dim_node_rep=dim_feature, 
        dim_edge_rep=dim_feature, 
        node_activation='silu', 
        edge_activation='silu', 
        iterations=3,
        shared_parms=True,
)

net = Cybertron(embedding=emb,
                model=mod,
                readout=readout,
                num_atoms=num_atoms,
                scale=scale)

net.print_info()
# net.compile()
total_parm = sum([p.numel() for p in net.parameters() if p.requires_grad])
logging.info(f"Total parameters: {total_parm}")
torch.set_float32_matmul_precision('high')

mwlc = MolWithLossCell(['coordinate','atom_type','label'], net, MSELoss(reduction='sum'))
mwec = MolWithEvalCell(['coordinate','atom_type','label'], net, MSELoss(reduction='sum'), normed_evaldata=True)

# 使用示例:
metrics = {
    "rmse": RMSE(
        index=0,           # 使用第一个输出
        per_atom=True,     # 按原子数平均
        reduction='mean',  # 使用平均值规约
        aggregate='mean'    # 使用求和聚合
    )
}

# 初始化模型、优化器和学习率调度器
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
init_lr = 1e-3
final_lr = 0.2e-3  
decay_rate = 0.998

def lr_lambda(epoch):
    if epoch < np.log(final_lr/init_lr) / np.log(decay_rate):
        return decay_rate ** epoch
    else:
        return final_lr / init_lr
lr = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda)

model = Model(
    dataset=dataset,
    train_network=mwlc,
    eval_network=mwec,
    optimizer=optimizer,
    scheduler=lr,
    metrics=metrics,
    main_metric="rmse",
    save_dir='/lustre/grp/gyqlab/fanc/pimm/ckpts',
)
model.train(num_epochs=1024)