monsoon-nlp commited on
Commit
634ab27
1 Parent(s): 825838f

fix readme for this actual model

Browse files
Files changed (1) hide show
  1. README.md +4 -10
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  license: mit
3
  datasets:
4
- - monsoon-nlp/greenbeing-proteins
5
  ---
6
 
7
  Adapter model / weights only for https://huggingface.co/monsoon-nlp/nyc-savvy-llama2-7b
@@ -13,13 +13,7 @@ from peft import AutoPeftModelForCausalLM
13
  from transformers import AutoTokenizer
14
 
15
  # this model
16
- model = AutoPeftModelForCausalLM.from_pretrained("monsoon-nlp/tinyllama-mixpretrain-uniprottune").to("cuda")
17
  # base model for the tokenizer
18
- tokenizer = AutoTokenizer.from_pretrained("monsoon-nlp/tinyllama-mixpretrain-quinoa-sciphi")
19
-
20
- inputs = tokenizer("<sequence> Subcellular locations:", return_tensors="pt")
21
- outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50)
22
- print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
23
- ```
24
-
25
- Notebook: https://colab.research.google.com/drive/1UTavcVpqWkp4C_GkkS_HxDQ0Orpw43iu?usp=sharing
 
1
  ---
2
  license: mit
3
  datasets:
4
+ - monsoon-nlp/asknyc-chatassistant-format
5
  ---
6
 
7
  Adapter model / weights only for https://huggingface.co/monsoon-nlp/nyc-savvy-llama2-7b
 
13
  from transformers import AutoTokenizer
14
 
15
  # this model
16
+ model = AutoPeftModelForCausalLM.from_pretrained("monsoon-nlp/nyc-savvy-llama2-7b-lora-adapter").to("cuda")
17
  # base model for the tokenizer
18
+ tokenizer = AutoTokenizer.from_pretrained("monsoon-nlp/nyc-savvy-llama2-7b")
19
+ ```