Edit model card
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)

BART Large CNN model trained for converting NLP queries to SQL queries. The model was trained on the Spider dataset Link: https://yale-lily.github.io/spider

The model was trained using Google colab.

Hyperparameters: "num epochs" = 3 "learning rate" = 1e-5 "batch size" = 8 "weight decay" = 0.01 "max input length" = 256 "max target length" = 256 "model name" : "facebook/bart-large-cnn"

use code :

from typing import List from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

tokenizer = AutoTokenizer.from_pretrained("upadhyay/sql") model = AutoModelForSeq2SeqLM.from_pretrained("upadhyay/sql")

def prepare_input(question: str, table: List[str]): table_prefix = "table:" question_prefix = "question:" join_table = ",".join(table) inputs = f"{question_prefix} {question} {table_prefix} {join_table}" input_ids = tokenizer(inputs, max_length=700, return_tensors="pt").input_ids return input_ids

def inference(question: str, table: List[str]) -> str: input_data = prepare_input(question=question, table=table) input_data = input_data.to(model.device) outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=700) result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True) return result

print(inference(question="what is salary?", table=["id", "name", "age"]))

Downloads last month
0