ruslanmv commited on
Commit
6af5ebe
·
verified ·
1 Parent(s): 944c3a5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +39 -8
README.md CHANGED
@@ -62,18 +62,49 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
62
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
63
  tokenizer = AutoTokenizer.from_pretrained(model_name)
64
 
65
- # Example usage
66
- input_text = "Recupera il conteggio di tutte le righe nella tabella table1"
67
- inputs = tokenizer(input_text, return_tensors="pt").to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- # Generate output text
70
- outputs = model.generate(**inputs, max_length=50)
71
 
72
- # Decode and print the generated text
73
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
74
- print(generated_text)
75
  ```
 
76
 
 
 
 
77
  ### Model Features
78
 
79
  - **Text Generation**: This model is fine-tuned to generate coherent and contextually accurate text based on the provided input.
 
62
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
63
  tokenizer = AutoTokenizer.from_pretrained(model_name)
64
 
65
+ # Initialize the tokenizer (adjust the model name as needed)
66
+ # Define EOS token for terminating the sequences
67
+ EOS_TOKEN = tokenizer.eos_token
68
+
69
+ # Define Alpaca-style prompt template
70
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
71
+
72
+ ### Instruction:
73
+ {}
74
+
75
+ ### Input:
76
+ {}
77
+
78
+ ### Response:
79
+ """
80
+
81
+ # Format the prompt without the response part
82
+ prompt = alpaca_prompt.format(
83
+ "Provide the SQL query",
84
+ "Seleziona tutte le colonne della tabella table1 dove la colonna anni è uguale a 2020"
85
+ )
86
+ # Tokenize the prompt and generate text
87
+ inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
88
+ outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
89
+
90
+ # Decode the generated text
91
+ generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
92
+
93
+ # Extract the generated response only (remove the prompt part)
94
+ response_start = generated_text.find("### Response:") + len("### Response:\n")
95
+ response = generated_text[response_start:].strip()
96
+
97
+ # Print the response (excluding the prompt)
98
+ print(response)
99
+
100
 
 
 
101
 
 
 
 
102
  ```
103
+ and the answer is
104
 
105
+ ```
106
+ SELECT * FROM table1 WHERE anni = 2020
107
+ ```
108
  ### Model Features
109
 
110
  - **Text Generation**: This model is fine-tuned to generate coherent and contextually accurate text based on the provided input.