import numpy as np
import glob
import torch
from torch.utils.data import TensorDataset, DataLoader
from cybertorch.units import Length
from cybertorch.embedding import MolEmbedding
from cybertorch.model import MolCT
from cybertorch.readout.gfn import GFNReadout
from cybertorch.readout import GraphReadout
from cybertorch.cybertron import Cybertron
from cybertorch.utils import set_cybertorch_global_device
from cybertorch.train import MolWithLossCell, MolWithEvalCell, CrossEntropyLoss
from cybertorch.train.new_metric import BinaryAccuracy
from cybertorch.train.model import Model
from cybertorch.dataset import MolecularDataset

device = "cuda"
set_cybertorch_global_device(device=device)

dim_feature = 128
emb = MolEmbedding(
        dim_node=dim_feature,
        emb_dis=True,
        emb_bond=False,
        cutoff=Length(0.6,'nm'),
        cutoff_fn='smooth',
        rbf_fn='gaussian',
        num_basis=dim_feature,
        activation='silu',
        length_unit='nm',
        use_sub_cutoff=False,
        )

mod = MolCT(
        dim_feature=dim_feature,
        dim_edge_emb=emb.dim_edge,
        n_interaction=3,
        n_heads=8,
        activation='silu',
        coupled_interaction=False,
        )

# readout = GFNReadout(
#         dim_node_rep=dim_feature, 
#         dim_edge_rep=dim_feature, 
#         node_activation='silu', 
#         edge_activation='silu', 
#         iterations=3,
#         shared_parms=True,
# )

readout = GraphReadout(1, 128, activation='silu')

net = Cybertron(embedding=emb,model=mod,readout=readout,num_atoms=43)
net.load_state_dict(torch.load(f'/lustre/grp/gyqlab/fanc/pimm/best_rmse_model.pt')['model_state_dict'],strict=False)
torch.set_float32_matmul_precision('high')
net.set_scaleshift(1,0)
net.compile()

mwlc = MolWithLossCell(['coordinate','atom_type','label'], net, CrossEntropyLoss(reduction='mean'))
mwec = MolWithEvalCell(['coordinate','atom_type','label'], net, CrossEntropyLoss(reduction='mean'), normed_evaldata=False)

dataset_file = 'discriminator_dataset.npz'
t = np.load(dataset_file)       
data = TensorDataset(torch.tensor(t['R'],dtype=torch.float32).to(device),
                     torch.tensor(t['Z'],dtype=torch.int32).to(device),
                     torch.tensor(t['L'],dtype=torch.float32).to(device),
                     )
dataset= MolecularDataset(data)

# 初始化模型、优化器和学习率调度器
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3, weight_decay=1e-4)
init_lr = 1e-3
final_lr = 0.2e-3  
decay_rate = 0.998

def lr_lambda(epoch):
    if epoch < np.log(final_lr/init_lr) / np.log(decay_rate):
        return decay_rate ** epoch
    else:
        return final_lr / init_lr
lr = torch.optim.lr_scheduler.LambdaLR(optimizer,lr_lambda)

# 使用示例:
metrics = {
    "accuracy": BinaryAccuracy(
        index=0,           # 使用第一个输出
        per_atom=False,     # 按原子数平均
        reduction='mean',  # 使用平均值规约
        aggregate='mean'    # 使用平均值聚合
    )
}

model = Model(
    dataset=dataset,
    train_network=mwlc,
    eval_network=mwec,
    optimizer=optimizer,
    scheduler=lr,
    metrics=metrics,
    main_metric="accuracy",
    save_dir='/lustre/grp/gyqlab/fanc/pimm/ckpts',
    prefix="discriminator"
)
model.train(num_epochs=32)