File size: 718 Bytes
d4cef17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch
from typing import Tuple
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = None
model = None


def get_model_and_tokenizer() -> Tuple[AutoModelForCausalLM, AutoTokenizer]:

    global model, tokenizer
    if model is None or tokenizer is None:
        # Set device
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # Load the tokenizer and the model
        tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer")
        model = AutoModelForCausalLM.from_pretrained(
            "juancopi81/lmd-8bars-2048-epochs40_v4"
        )

        # Move model to device
        model = model.to(device)

    return model, tokenizer