fionazhang commited on
Commit
962c452
1 Parent(s): ba2832b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -11
README.md CHANGED
@@ -34,23 +34,35 @@ This repository includes the weights learned during the training process. It sho
34
 
35
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
36
  ```python
37
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
38
 
39
  # Load the tokenizer, adjust configuration if needed
40
  tokenizer = AutoTokenizer.from_pretrained(model_name)
41
  model = AutoModelForCausalLM.from_pretrained(model_name)
42
 
43
- # Load the fine-tuned model with its trained weights
44
- fine_tuned_model = AutoModelForSequenceClassification.from_pretrained(
45
- 'fionazhang/mistral_7b_environment',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  )
47
-
48
- # Now you can use `fine_tuned_model` for inference or further training
49
- input_text = "The impact of climate change on"
50
- output_text = fine_tuned_model.generate(tokenizer.encode(input_text, return_tensors="pt"))
51
-
52
- print(tokenizer.decode(output_text[0], skip_special_tokens=True))
53
-
54
  ```
55
 
56
 
 
34
 
35
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
36
  ```python
37
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
38
 
39
  # Load the tokenizer, adjust configuration if needed
40
  tokenizer = AutoTokenizer.from_pretrained(model_name)
41
  model = AutoModelForCausalLM.from_pretrained(model_name)
42
 
43
+ # Text generation
44
+ def generate_text_sequences(pipe, prompt):
45
+ sequences = pipe(
46
+ f"prompt",
47
+ do_sample=True,
48
+ max_new_tokens=100,
49
+ temperature=0.8,
50
+ top_k=50,
51
+ top_p=0.95,
52
+ num_return_sequences=1,
53
+ )
54
+ return sequences[0]['generated_text']
55
+
56
+ # Now you can use the model for inference
57
+ pipe = pipeline(
58
+ "text-generation",
59
+ model=model,
60
+ tokenizer=tokenizer,
61
+ torch_dtype=torch.bfloat16,
62
+ device_map="auto",
63
+ pad_token_id=2
64
  )
65
+ print(generate_text_sequences(pipe, "your prompt"))
 
 
 
 
 
 
66
  ```
67
 
68