from transformers import AutoTokenizer, AutoModelForCausalLM | |
tokenizer = AutoTokenizer.from_pretrained("ansumanpandey/codgen-finetuned-SQLQueryGeneration") | |
model = AutoModelForCausalLM.from_pretrained("ansumanpandey/codgen-finetuned-SQLQueryGeneration") | |
def get_sql(query): | |
input_text = "Query to %s </s>" % query | |
features = tokenizer([input_text], return_tensors='pt') | |
output = model.generate(input_ids=features['input_ids'], | |
attention_mask=features['attention_mask'], | |
max_new_tokens=70) | |
sql_query= tokenizer.decode(output[0]) | |
return sql_query |