##################################################
from model import make_model, EncoderDecoder
from spacy import load as spacy_load
from torchtext.vocab import Vocab
import parameters as p
import torchtext
import spacy
import torch


def train(model: EncoderDecoder, optimizer, vocab: Vocab, dataloader):

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = make_model(src_vocab=p.SOURCE_VOCAB_SIZE,
                       tgt_vocab=p.TARGET_VOCAB_SIZE,
                       encoder_layers=p.ENCODER_LAYERS,
                       decoder_layers=p.DECODER_LAYERS,
                       d_model=p.D_MODEL,
                       d_ff=p.D_FF,
                       h=p.HEADS).to(device=device)

    for src, tgt in dataloader:
        

torchtext.data.get_tokenizer()
