STEM-AI-mtl commited on
Commit
3356adb
1 Parent(s): 49b4292

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -56
README.md CHANGED
@@ -44,62 +44,6 @@ Refer to [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) model card fo
44
 
45
  [GPTQ format](https://github.com/STEM-ai/Phi-2/blob/ab1ced8d7922765344d824acf1924df99606b4fc/chat-GPTQ.py)
46
 
47
- ### How to use
48
- ```python
49
- import torch
50
- from peft import PeftModel, PeftConfig
51
- from transformers import AutoModelForCausalLM, AutoTokenizer
52
- import warnings
53
- import os
54
-
55
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
56
- warnings.filterwarnings("ignore", category=UserWarning, module='transformers.generation.utils')
57
-
58
- def load_model_and_tokenizer():
59
- base_model = "microsoft/phi-2"
60
- peft_model_id = "STEM-AI-mtl/phi-2-electrical-engineering"
61
- config = PeftConfig.from_pretrained(peft_model_id, trust_remote_code=True)
62
- model = AutoModelForCausalLM.from_pretrained(base_model, device_map="cuda:0",return_dict=True, trust_remote_code=True)
63
-
64
- model = model.to('cuda')
65
-
66
- tokenizer = AutoTokenizer.from_pretrained(base_model)
67
- model = PeftModel.from_pretrained(model, peft_model_id, trust_remote_code=True)
68
-
69
- model = model.to('cuda')
70
-
71
- return model, tokenizer
72
-
73
- def generate(instruction, model, tokenizer):
74
- inputs = tokenizer(instruction, return_tensors="pt", return_attention_mask=False)
75
- inputs = inputs.to('cuda')
76
- outputs = model.generate(
77
- **inputs,
78
- max_length=350,
79
- do_sample=True,
80
- temperature=0.7,
81
- top_k=50,
82
- top_p=0.9,
83
- repetition_penalty=1,
84
- )
85
- text = tokenizer.batch_decode(outputs)[0]
86
- return text
87
-
88
-
89
- if __name__ == '__main__':
90
- model, tokenizer = load_model_and_tokenizer()
91
- while True:
92
- instruction = input("Enter your instruction: ")
93
- if not instruction:
94
- continue
95
- if instruction.lower() in ["exit", "quit", "exit()", "quit()"]:
96
- print("Exiting...")
97
- break
98
-
99
- answer = generate(instruction, model, tokenizer)
100
- print(f'Answer: {answer}')
101
- ```
102
-
103
  ## Training Details
104
 
105
  ### Training Data
 
44
 
45
  [GPTQ format](https://github.com/STEM-ai/Phi-2/blob/ab1ced8d7922765344d824acf1924df99606b4fc/chat-GPTQ.py)
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ## Training Details
48
 
49
  ### Training Data