Rename Readme.md to README.md
Browse files
README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This Model is based on Llama-2 7B model provided by Meta. The Model accepts text and return SQL-query. This Model has been fine-tuned on "NousResearch/Llama-2-7b-hf".
|
2 |
+
|
3 |
+
```python
|
4 |
+
# Use a pipeline as a high-level helper
|
5 |
+
from transformers import pipeline
|
6 |
+
|
7 |
+
pipe = pipeline("text2text-generation", model="ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
8 |
+
|
9 |
+
# Load model directly
|
10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
11 |
+
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
13 |
+
model = AutoModelForCausalLM.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
14 |
+
|
15 |
+
# Run text generation pipeline with our next model
|
16 |
+
context = "CREATE TABLE Student (name VARCHAR, college VARCHAR, age VARCHAR, group VARCHAR, marks VARCHAR)"
|
17 |
+
question = "List the name of Students belongs to school 'St. Xavier' and having marks greater than '600'"
|
18 |
+
|
19 |
+
prompt = f"""Below is an context that describes a sql query, paired with an question that provides further information. Write an answer that appropriately completes the request.
|
20 |
+
### Context:
|
21 |
+
{context}
|
22 |
+
### Question:
|
23 |
+
{question}
|
24 |
+
### Answer:"""
|
25 |
+
|
26 |
+
sequences = pipeline(
|
27 |
+
prompt,
|
28 |
+
do_sample=True,
|
29 |
+
top_k=10,
|
30 |
+
num_return_sequences=1,
|
31 |
+
eos_token_id=tokenizer.eos_token_id,
|
32 |
+
max_length=200,
|
33 |
+
)
|
34 |
+
for seq in sequences:
|
35 |
+
print(f"Result: {seq['generated_text']}")
|
36 |
+
|
37 |
+
```
|
Readme.md
DELETED
File without changes
|