from model import wide_and_deep
from prework import device
from prework.dataset import data_deal
from prework.metrics import accuracy, precision, recall, f1
from prework.callback import save_log_plus
from model.loss import Loss
import torch
from torch_lib import fit, evaluate

train_dataset, val_dataset, test_dataset = data_deal(batch_size=1, train_ratio=0.7, val_ratio=0.1)

model = wide_and_deep.WideAndDeep_plus().to(device=device)
# Find total parameters and trainable parameters
total_trainable_params = sum(
    p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

loss = Loss()


def call(data):
    mode = data['model']
    for name, parms in mode.named_parameters():
        if name == 'fuse_layer.bias':
            print('-->name:', name, '-->grad_requirs:', parms, ' -->grad_value:', parms.grad)


fit(model=model, train_dataset=train_dataset, val_dataset=val_dataset, epochs=100, metrics=[loss, accuracy, precision, recall, f1], epoch_callbacks=[save_log_plus], optimizer=optimizer, lr_decay='lambda', lr_decay_options={'lr_lambda': lambda epoch: (1-epoch / 1000) ** 0.9})
eva = evaluate(model=model, dataset=test_dataset, metrics=[loss, accuracy, precision, recall, f1])
print(eva)
