mrm8488 commited on
Commit
7ef22d4
•
1 Parent(s): 73180e6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +17 -6
README.md CHANGED
@@ -3,6 +3,7 @@ tags:
3
  - generated_from_trainer
4
  - code
5
  - coding
 
6
  model-index:
7
  - name: FalCoder
8
  results: []
@@ -27,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
27
 
28
  ## Model description 🧠
29
 
30
- [Llama-2](https://huggingface.co/tiiuae/falcon-7b)
31
 
32
 
33
  ## Training and evaluation data 📚
@@ -62,6 +63,11 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
62
 
63
  model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda")
64
 
 
 
 
 
 
65
  def generate(
66
  instruction,
67
  max_new_tokens=128,
@@ -69,9 +75,9 @@ def generate(
69
  top_p=0.75,
70
  top_k=40,
71
  num_beams=4,
72
- **kwargs
73
  ):
74
- prompt = instruction + "\n### Solution:\n"
75
  print(prompt)
76
  inputs = tokenizer(prompt, return_tensors="pt")
77
  input_ids = inputs["input_ids"].to("cuda")
@@ -97,9 +103,14 @@ def generate(
97
  output = tokenizer.decode(s)
98
  return output.split("### Solution:")[1].lstrip("\n")
99
 
100
- instruction = "Design a class for representing a person in Python."
 
 
 
 
 
 
101
  print(generate(instruction))
102
  ```
103
 
104
- ### Citation
105
-
 
3
  - generated_from_trainer
4
  - code
5
  - coding
6
+ - llama
7
  model-index:
8
  - name: FalCoder
9
  results: []
 
28
 
29
  ## Model description 🧠
30
 
31
+ [Llama-2](https://huggingface.co/meta-llama/Llama-2-7b)
32
 
33
 
34
  ## Training and evaluation data 📚
 
63
 
64
  model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda")
65
 
66
+ def create_prompt(instruction):
67
+ system = "You are a coding assistant that will help the user to resolve the following instruction:"
68
+ instruction = "### Instruction: " + instruction
69
+ return system + "\n" + instruction + "\n\n" + "### Solution:" + "\n"
70
+
71
  def generate(
72
  instruction,
73
  max_new_tokens=128,
 
75
  top_p=0.75,
76
  top_k=40,
77
  num_beams=4,
78
+ **kwargs,
79
  ):
80
+ prompt = create_prompt(instruction)
81
  print(prompt)
82
  inputs = tokenizer(prompt, return_tensors="pt")
83
  input_ids = inputs["input_ids"].to("cuda")
 
103
  output = tokenizer.decode(s)
104
  return output.split("### Solution:")[1].lstrip("\n")
105
 
106
+ instruction = """
107
+ Edit the following XML code to add a navigation bar to the top of a web page
108
+ <html>
109
+ <head>
110
+ <title>CliBrAIn</title>
111
+ </head>
112
+ """
113
  print(generate(instruction))
114
  ```
115
 
116
+ ### Citation