File size: 1,740 Bytes
30ffb9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#%%
import os, time, io, zipfile
from preprocessing import FileIO
import shutil
import modal
from llama_index.finetuning import EmbeddingQAFinetuneDataset

from dotenv import load_dotenv, find_dotenv
env = load_dotenv(find_dotenv('env'), override=True)

#%%
training_path = 'data/training_data_300.json'
valid_path = 'data/validation_data_100.json'

training_set = EmbeddingQAFinetuneDataset.from_json(training_path)
valid_set = EmbeddingQAFinetuneDataset.from_json(valid_path)

def finetune(model='all-mpnet-base-v2', savemodel=False, outpath='.'):
    """ Finetunes a model on Modal GPU A100.
        The model is saved in /root/models on a Modal volume 
        and can be stored locally.

    Args:
        model (str): the Sentence Transformer model name
        savemodel (bool, optional): whether to save the model or not.

    Returns:
        path of the saved model (when saved)
    """
    f = modal.Function.lookup("vector-search-project", "finetune")
    model = model.replace('/','')
    
    if 'sentence-transformers' not in model:
        model = f"sentence-transformers/{model}"

    fullpath = os.path.join(outpath, f"finetuned-{model}-300")
    
    if os.path.exists(fullpath):
        msg = "Model already exists!"
        print(msg)
        return msg

    start = time.perf_counter()
    finetuned_model = f.remote(training_path, valid_path, model_id=model)

    end = time.perf_counter() - start
    print(f"Finetuning with GPU lasted {end:.2f} seconds")

    if savemodel:
        
        with open(fullpath, 'wb') as file:
            # Write the contents of the BytesIO object to a new file
            file.write(finetuned_model.getbuffer())
        print(f"Model saved in {fullpath}")
        return fullpath