furquan commited on
Commit
847b0e8
1 Parent(s): 50116f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -2,17 +2,20 @@ import gradio as gr
2
  import torch
3
 
4
  from transformers import pipeline, AutoTokenizer, AutoModel, LlamaForCausalLM
 
5
 
6
  #pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
7
 
8
  # tokenizer = AutoTokenizer.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
9
  # model = AutoModel.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
10
- model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
11
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
12
 
 
 
13
 
14
  title = "OPT-1.3B"
15
- description = "This demo uses meta's LLama-2-7b Causal LM as base model that was prompt tuned on the Stanford Sentiment Treebank-5 way dataset to only output the sentiment of a given text."
16
  article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"
17
 
18
 
 
2
  import torch
3
 
4
  from transformers import pipeline, AutoTokenizer, AutoModel, LlamaForCausalLM
5
+ from peft import PeftModel
6
 
7
  #pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
8
 
9
  # tokenizer = AutoTokenizer.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
10
  # model = AutoModel.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
11
+ model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token="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")
12
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
13
 
14
+ model = PeftModel.from_pretrained(model, "furquan/llama2-sentiment-prompt-tuned")
15
+
16
 
17
  title = "OPT-1.3B"
18
+ description = "This demo uses meta's LLama-2-7b Causal LM as base model that was prompt tuned on the mteb/tweet_sentiment_extraction dataset to only output the sentiment of a given text."
19
  article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"
20
 
21