File size: 1,183 Bytes
d6e4d0e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83f7278
d6e4d0e
053c4f0
d6e4d0e
 
 
053c4f0
 
 
 
 
 
 
d6e4d0e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# loading model and Library

from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import DataCollatorWithPadding

pipe = pipeline("SQL_Query_Generator", model="defog/sqlcoder-34b-alpha")

tokenizer = AutoTokenizer.from_pretrained("defog/sqlcoder-34b-alpha")
model = AutoModelForCausalLM.from_pretrained("defog/sqlcoder-34b-alpha")
raw_dataset= load_datset('sql_train_dataset.json')

#%% section 1 (preparing the dataset for fine tunning)

def tokenize_func(df):
    return tokenizer(df['question'],df['answer'],truncation=True)

tokenize_dataset=raw_dataset.map(tokenize_func,batched=True)

data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")

tf_train_dataset = tokenized_dataset["train"].to_tf_dataset(
    columns=["attention_mask", "input_ids", "token_type_ids"],
    label_cols=["answer"],
    shuffle=True,
    collate_fn=data_collator,
    batch_size=8,
)
tf_validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
    columns=["attention_mask", "input_ids", "token_type_ids"],
    label_cols=["answer"],
    shuffle=False,
    collate_fn=data_collator,
    batch_size=8,
)