Q-bert commited on
Commit
3a40793
1 Parent(s): d34e613

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +32 -2
README.md CHANGED
@@ -30,8 +30,7 @@ You can utilize AlpaGo to perform natural language processing tasks. Here's an e
30
  ```python
31
  from peft import PeftModel
32
  import torch
33
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
34
-
35
  model_id = "EleutherAI/gpt-neox-20b"
36
  tokenizer = AutoTokenizer.from_pretrained(model_id)
37
  bnb_config = BitsAndBytesConfig(
@@ -42,4 +41,35 @@ bnb_config = BitsAndBytesConfig(
42
  )
43
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
44
  model = PeftModel.from_pretrained(model, "myzens/AlpaGo")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  ```
 
30
  ```python
31
  from peft import PeftModel
32
  import torch
33
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GenerationConfig
 
34
  model_id = "EleutherAI/gpt-neox-20b"
35
  tokenizer = AutoTokenizer.from_pretrained(model_id)
36
  bnb_config = BitsAndBytesConfig(
 
41
  )
42
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
43
  model = PeftModel.from_pretrained(model, "myzens/AlpaGo")
44
+
45
+ #You can change Here.
46
+ PROMPT = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
47
+ ### Instruction:
48
+ Write a short story about a lost key that unlocks a mysterious door.
49
+ ### Response:"""
50
+
51
+ inputs = tokenizer(PROMPT, return_tensors="pt")
52
+ input_ids = inputs["input_ids"].cuda()
53
+
54
+ generation_config = GenerationConfig(
55
+ temperature=0.6,
56
+ top_p=0.95,
57
+ repetition_penalty=1.15,
58
+
59
+ )
60
+
61
+ print("Generating...")
62
+ generation_output = model.generate(
63
+ input_ids=input_ids,
64
+ generation_config=generation_config,
65
+ return_dict_in_generate=True,
66
+ output_scores=True,
67
+ max_new_tokens=256,
68
+ eos_token_id=tokenizer.eos_token_id,
69
+ pad_token_id=tokenizer.pad_token_id,
70
+ )
71
+
72
+ for s in generation_output.sequences:
73
+ print(tokenizer.decode(s))
74
+
75
  ```