File size: 2,552 Bytes
a1e23dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
base_model: bardsai/jaskier-7b-dpo-v5.6
gate_mode: hidden
dtype: bfloat16
experts:
  - source_model: Weyaxi/OpenHermes-2.5-neural-chat-v3-3-Slerp
    positive_prompts:
      - "IT"
      - "programming"
      - "Science"
      - "Computing"
      - "Processing"
    negative_prompts:
      - "incorrect"
  - source_model: macadeliccc/MonarchLake-7B
    positive_prompts:
      - "Chat"
      - "Discuss"
    negative_prompts:
      - "fictional"
  - source_model: paulml/OmniBeagleSquaredMBX-v3-7B-v2
    positive_prompts:
      - "storywriting"
      - "fiction"
    negative_prompts:
      - "nonfiction"
  - source_model: louisbrulenaudet/Pearl-7B-slerp
    positive_prompts:
      - "Arithmetic"
    negative_prompts:
      - "fictional"
  - source_model: CultriX/NeuralTrix-7B-dpo
    positive_prompts:
      - "stimulating"
      - "interesting"
    negative_prompts:
      - "boring"
  - source_model: FelixChao/Capricorn-7B-DPO
    positive_prompts:
      - "business"
      - "finance"
    negative_prompts:
      - "irresponsible"
  - source_model: CultriX/NeuralTrix-7B-dpo
    positive_prompts:
      - "research"
      - "intriguing"
    negative_prompts:
      - "fictional"
  - source_model: louisbrulenaudet/Pearl-7B-slerp
    positive_prompts:
      - "algebra"
      - "calculus"
    negative_prompts:
      - "inaccurate"
  - source_model: openagi-project/OpenAGI-7B-v0.1
    positive_prompts:
      - "professional"
      - "business"
    negative_prompts:
      - "unprofessional"
  - source_model: FelixChao/Capricorn-7B-DPO
    positive_prompts:
      - "Conceptual"
      - "ideal"
    negative_prompts:
      - "pragmatic"
  - source_model: Weyaxi/OpenHermes-2.5-neural-chat-v3-3-Slerp
    positive_prompts:
      - "technology"
      - "tech"
    negative_prompts:
      - "archaic"
  - source_model: macadeliccc/MonarchLake-7B
    positive_prompts:
      - "pattern"
      - "recognition"
    negative_prompts:
      - "mismatch"
  - source_model: macadeliccc/MonarchLake-7B
    positive_prompts:
      - "positive"
      - "understanding"
    negative_prompts:
      - "safe"
  - source_model: bardsai/jaskier-7b-dpo-v5.6
    positive_prompts:
      - "precise"
      - "accurate"
    negative_prompts:
      - "inaccurate"
  - source_model: bardsai/jaskier-7b-dpo-v5.6
    positive_prompts:
      - "intellectual"
      - "efficient"
    negative_prompts:
      - "stupid"
  - source_model: macadeliccc/MonarchLake-7B
    positive_prompts:
      - "medical"
      - "illness"
    negative_prompts:
      - "poor practices"