edumunozsala commited on
Commit
2b9d925
1 Parent(s): 43d3864

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -7
README.md CHANGED
@@ -81,22 +81,24 @@ The following `bitsandbytes` quantization config was used during training:
81
  import torch
82
  from transformers import AutoModelForCausalLM, AutoTokenizer
83
 
84
- model_id = "mrm8488/llama-2-coder-7b"
85
 
86
- tokenizer = AutoTokenizer.from_pretrained(model_id)
87
 
88
- model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda")
 
89
 
90
- sample = dataset[randrange(len(dataset))]
 
91
 
92
  prompt = f"""### Instruction:
93
  Use the Task below and the Input given to write the Response, which is a programming code that can solve the Task.
94
 
95
  ### Task:
96
- {sample['instruction']}
97
 
98
  ### Input:
99
- {sample['input']}
100
 
101
  ### Response:
102
  """
@@ -107,7 +109,6 @@ outputs = model.generate(input_ids=input_ids, max_new_tokens=100, do_sample=True
107
 
108
  print(f"Prompt:\n{prompt}\n")
109
  print(f"Generated instruction:\n{tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}")
110
- print(f"Ground truth:\n{sample['output']}")
111
 
112
  ```
113
 
 
81
  import torch
82
  from transformers import AutoModelForCausalLM, AutoTokenizer
83
 
84
+ model_id = "edumunozsala/llama-2-7b-int4-python-code-20k"
85
 
86
+ tokenizer = AutoTokenizer.from_pretrained(hf_model_repo)
87
 
88
+ model = AutoModelForCausalLM.from_pretrained(hf_model_repo, load_in_4bit=True, torch_dtype=torch.float16,
89
+ device_map=device_map)
90
 
91
+ instruction="Write a Python function to display the first and last elements of a list."
92
+ input=""
93
 
94
  prompt = f"""### Instruction:
95
  Use the Task below and the Input given to write the Response, which is a programming code that can solve the Task.
96
 
97
  ### Task:
98
+ {instruction}
99
 
100
  ### Input:
101
+ {input}
102
 
103
  ### Response:
104
  """
 
109
 
110
  print(f"Prompt:\n{prompt}\n")
111
  print(f"Generated instruction:\n{tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}")
 
112
 
113
  ```
114