dshin commited on
Commit
8fc6b16
1 Parent(s): d44c340

Push model using huggingface_hub.

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. pytorch_model.bin +1 -1
README.md CHANGED
@@ -24,7 +24,7 @@ You can then generate text as follows:
24
  ```python
25
  from transformers import pipeline
26
 
27
- generator = pipeline("text-generation", model="dshin//tmp/tmp8hrea3qg/dshin/flan-t5-ppo")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
@@ -34,8 +34,8 @@ If you want to use the model for training or to obtain the outputs from the valu
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
- tokenizer = AutoTokenizer.from_pretrained("dshin//tmp/tmp8hrea3qg/dshin/flan-t5-ppo")
38
- model = AutoModelForCausalLMWithValueHead.from_pretrained("dshin//tmp/tmp8hrea3qg/dshin/flan-t5-ppo")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
 
24
  ```python
25
  from transformers import pipeline
26
 
27
+ generator = pipeline("text-generation", model="dshin//tmp/tmp3l_wgjg2/dshin/flan-t5-ppo")
28
  outputs = generator("Hello, my llama is cute")
29
  ```
30
 
 
34
  from transformers import AutoTokenizer
35
  from trl import AutoModelForCausalLMWithValueHead
36
 
37
+ tokenizer = AutoTokenizer.from_pretrained("dshin//tmp/tmp3l_wgjg2/dshin/flan-t5-ppo")
38
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("dshin//tmp/tmp3l_wgjg2/dshin/flan-t5-ppo")
39
 
40
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
41
  outputs = model(**inputs, labels=inputs["input_ids"])
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b474855dd9791c0481d74899e2b79fd3ba73ee5a322ca972b1ea7af2e752938
3
  size 990412605
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9033f0f57a7cd4983b4b26826dc4f84862b6e4216c197f1c8215b7f38d871a18
3
  size 990412605