import numpy as np
import sys

sys.path.append('/kaggle/working/deep_over_sampling')
import torch
from torch.utils.data import DataLoader
import os
from dos.dataset import get_splits_v1, get_imbalanced_weights, get_splits_v2
from dos.baseline import Model
from experiments import Handler, set_seed

seed = 1
set_seed(seed)
rolling_window_size = 5
unique_targets = np.arange(2)
resample_dict = get_imbalanced_weights(unique_targets, minority_count=1, min_weight=0.1, max_weight=0.5)
k_dict = {target: 5 for target in resample_dict}

print('resample_dict = {}\n{}\n k_dict = {}\n{}\n'.format(resample_dict, '=' * 150, k_dict, '=' * 150))

#splits, size = get_splits_v1(path='/kaggle/input/fraud-detection-dataset/transactions/transactions.txt', resample_dict=resample_dict, rolling_window_size=rolling_window_size, seed=seed)
splits, size = get_splits_v2(path='/kaggle/input/creditcardfraud/creditcard.csv',
                             resample_dict=resample_dict, rolling_window_size=rolling_window_size, seed=seed)
train_dataset = DataLoader(splits['train'], batch_size=128, shuffle=True, num_workers=2, collate_fn=None)
valid_dataset = DataLoader(splits['valid'], batch_size=128, shuffle=True, num_workers=2, collate_fn=None)
test_dataset = DataLoader(splits['test'], batch_size=128, shuffle=True, num_workers=2, collate_fn=None)

model = Model(k_dict=k_dict, input_size=size, num_classes=2)

optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)

handler = Handler(model=model, optimizer=optimizer, num_classes=2, alpha=1.0)

handler.train(train_dataset=train_dataset, valid_dataset=valid_dataset, num_epochs=10, grad_accum_step=1)
handler.evaluate(test_dataset, name='test')

handler.summary(train_dataset, csv_path='./train_summary.csv')
handler.summary(test_dataset, csv_path='./test_summary.csv')

handler.save_checkpoint(path='./ckpt.pth')
