File size: 2,547 Bytes
e92d4e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
model-index:
- name: LLAMA 7B Sentiment Analysis Adapter
  results:
    - task:
        name: Sentiment Analysis
        type: text-classification
      dataset:
        name: Amazon Sentiment Review dataset
        type: amazon_reviews
model-metadata:
  license: apache-2.0
  library_name: transformers
  tags: ["text-classification", "sentiment-analysis", "English"]
  languages: ["en"]
  widget:
    - text: "I love using FuturixAI for my daily tasks!"

intended-use:
  primary-uses:
    - This model is intended for sentiment analysis on English language text.
  primary-users:
    - Researchers
    - Social media monitoring tools
    - Customer feedback analysis systems

training-data:
  training-data-source: Amazon Sentiment Review dataset

quantitative-analyses:
  use-cases-limitations:
    - The model may perform poorly on texts that contain a lot of slang or are in a different language than it was trained on.

ethical-considerations:
  risks-and-mitigations:
    - There is a risk of the model reinforcing or creating biases based on the training data. Users should be aware of this and consider additional bias mitigation strategies when using the model.

model-architecture:
  architecture: LLAMA 7B with LORA adaptation
  library: PeftModel

how-to-use:
  installation:
    - pip install transformers peft
  code-examples:
    - |
      ```python
      import transformers
      from peft import PeftModel

      model_name = "meta-llama/Llama-2-7b"  # you can use VICUNA 7B model as well
      peft_model_id = "Futurix-AI/LLAMA_7B_Sentiment_Analysis_Amazon_Review_Dataset"

      tokenizer_t5 = transformers.AutoTokenizer.from_pretrained(model_name)
      model_t5 = transformers.AutoModelForCausalLM.from_pretrained(model_name)
      model_t5 = PeftModel.from_pretrained(model_t5, peft_model_id)

      prompt = """
      Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
      ###Instruction:
      Detect the sentiment of the tweet.
      ###Input:
      FuturixAI embodies the spirit of innovation, with a resolve to push the boundaries of what's possible through science and technology. 
      ###Response:
      """

      inputs = tokenizer_t5(prompt, return_tensors="pt")
      for k, v in inputs.items():
          inputs[k] = v
      outputs = model_t5.generate(**inputs, max_length=256, do_sample=True)
      text = tokenizer_t5.batch_decode(outputs, skip_special_tokens=True)[0]
      print(text)
      ```