kr-manish commited on
Commit
8dfdbdc
1 Parent(s): 45ed10f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -18
README.md CHANGED
@@ -22,24 +22,42 @@ This model was trained using AutoTrain. For more information, please visit [Auto
22
 
23
  from transformers import AutoModelForCausalLM, AutoTokenizer
24
 
25
- model_path = "PATH_TO_THIS_REPO"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
 
27
  tokenizer = AutoTokenizer.from_pretrained(model_path)
28
- model = AutoModelForCausalLM.from_pretrained(
29
- model_path,
30
- device_map="auto",
31
- torch_dtype='auto'
32
- ).eval()
33
-
34
- # Prompt content: "hi"
35
- messages = [
36
- {"role": "user", "content": "hi"}
37
- ]
38
-
39
- input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
40
- output_ids = model.generate(input_ids.to('cuda'))
41
- response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
42
-
43
- # Model response: "Hello! How can I assist you today?"
44
- print(response)
45
  ```
 
22
 
23
  from transformers import AutoModelForCausalLM, AutoTokenizer
24
 
25
+ model_path = "kr-manish/Mistral-7B-autotrain-finetune-QA-vx"
26
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
27
+ model = AutoModelForCausalLM.from_pretrained(model_path)
28
+
29
+ input_text = "What is the scientific name of the honey bee?"
30
+ # Tokenize input text
31
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
32
+
33
+ # Generate output text
34
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1, do_sample=True)
35
+
36
+ # Decode and print output
37
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
38
+ print(generated_text)
39
+
40
+ #Apis mellifera.
41
+ ```
42
+
43
+ # Usage
44
+
45
+ ```python
46
+
47
+ from transformers import AutoModelForCausalLM, AutoTokenizer
48
 
49
+ model_path = "kr-manish/Mistral-7B-autotrain-finetune-QA-vx"
50
  tokenizer = AutoTokenizer.from_pretrained(model_path)
51
+ model = AutoModelForCausalLM.from_pretrained(model_path)
52
+
53
+ # Generate response
54
+ input_text = "Give three tips for staying healthy."
55
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
56
+ output = model.generate(input_ids, max_new_tokens = 200)
57
+ predicted_text = tokenizer.decode(output[0], skip_special_tokens=True)
58
+ print(predicted_text)
59
+
60
+ #Give three tips for staying healthy. [/INST] 1. Eat a balanced diet: Make sure to include plenty of fruits, vegetables, lean proteins, and whole grains in your diet. Avoid processed foods and sugary drinks.
61
+ #2. Exercise regularly: Aim for at least 30 minutes of moderate exercise every day, such as walking, cycling, or swimming.
62
+ #3. Get enough sleep: Aim for 7-9 hours of quality sleep each night. Make sure to create a relaxing bedtime routine and stick to a regular sleep schedule.
 
 
 
 
 
63
  ```