Spaces:
Build error
Build error
refactor global vars
Browse files- .gitignore +1 -0
- app.py +12 -91
- globalvars.py +86 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
app.py
CHANGED
@@ -13,93 +13,9 @@ import os
|
|
13 |
import json
|
14 |
import numpy as np
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
## Global Variables
|
21 |
-
|
22 |
-
title = """
|
23 |
-
# 👋🏻Welcome to 🙋🏻♂️Tonic's 📽️Nvidia 🛌🏻Embed V-1 !"""
|
24 |
-
|
25 |
-
description = """
|
26 |
-
You can use this Space to test out the current model [nvidia/NV-Embed-v1](https://huggingface.co/nvidia/NV-Embed-v1). 🐣a generalist embedding model that ranks No. 1 on the Massive Text Embedding Benchmark (MTEB benchmark)(as of May 24, 2024), with 56 tasks, encompassing retrieval, reranking, classification, clustering, and semantic textual similarity tasks.
|
27 |
-
You can also use 📽️Nvidia 🛌🏻Embed V-1 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/NV-Embed?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
|
28 |
-
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [MultiTonic](https://github.com/MultiTonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
29 |
-
"""
|
30 |
-
|
31 |
-
tasks = {
|
32 |
-
'ClimateFEVER': 'Given a claim about climate change, retrieve documents that support or refute the claim',
|
33 |
-
'DBPedia': 'Given a query, retrieve relevant entity descriptions from DBPedia',
|
34 |
-
'FEVER': 'Given a claim, retrieve documents that support or refute the claim',
|
35 |
-
'FiQA2018': 'Given a financial question, retrieve user replies that best answer the question',
|
36 |
-
'HotpotQA': 'Given a multi-hop question, retrieve documents that can help answer the question',
|
37 |
-
'MSMARCO': 'Given a web search query, retrieve relevant passages that answer the query',
|
38 |
-
'NFCorpus': 'Given a question, retrieve relevant documents that best answer the question',
|
39 |
-
'NQ': 'Given a question, retrieve Wikipedia passages that answer the question',
|
40 |
-
'QuoraRetrieval': 'Given a question, retrieve questions that are semantically equivalent to the given question',
|
41 |
-
'SCIDOCS': 'Given a scientific paper title, retrieve paper abstracts that are cited by the given paper',
|
42 |
-
}
|
43 |
-
|
44 |
-
intention_prompt= """
|
45 |
-
"type": "object",
|
46 |
-
"properties": {
|
47 |
-
"ClimateFEVER": {
|
48 |
-
"type": "boolean",
|
49 |
-
"description" : "select this for climate science related text"
|
50 |
-
},
|
51 |
-
"DBPedia": {
|
52 |
-
"type": "boolean",
|
53 |
-
"description" : "select this for encyclopedic related knowledge"
|
54 |
-
},
|
55 |
-
"FEVER": {
|
56 |
-
"type": "boolean",
|
57 |
-
"description": "select this to verify a claim or embed a claim"
|
58 |
-
},
|
59 |
-
"FiQA2018": {
|
60 |
-
"type": "boolean",
|
61 |
-
"description" : "select this for financial questions or topics"
|
62 |
-
},
|
63 |
-
"HotpotQA": {
|
64 |
-
"type": "boolean",
|
65 |
-
"description" : "select this for a multi-hop question or for texts that provide multihop claims"
|
66 |
-
},
|
67 |
-
"MSMARCO": {
|
68 |
-
"type": "boolean",
|
69 |
-
"description": "Given a web search query, retrieve relevant passages that answer the query"
|
70 |
-
},
|
71 |
-
"NFCorpus": {
|
72 |
-
"type": "boolean",
|
73 |
-
"description" : "Given a question, retrieve relevant documents that best answer the question"
|
74 |
-
},
|
75 |
-
"NQ": {
|
76 |
-
"type": "boolean",
|
77 |
-
"description" : "Given a question, retrieve Wikipedia passages that answer the question"
|
78 |
-
},
|
79 |
-
"QuoraRetrieval": {
|
80 |
-
"type": "boolean",
|
81 |
-
"description": "Given a question, retrieve questions that are semantically equivalent to the given question"
|
82 |
-
},
|
83 |
-
"SCIDOCS": {
|
84 |
-
"type": "boolean",
|
85 |
-
"description": "Given a scientific paper title, retrieve paper abstracts that are cited by the given paper"
|
86 |
-
}
|
87 |
-
},
|
88 |
-
"required": [
|
89 |
-
"ClimateFEVER",
|
90 |
-
"DBPedia",
|
91 |
-
"FEVER",
|
92 |
-
"FiQA2018",
|
93 |
-
"HotpotQA",
|
94 |
-
"MSMARCO",
|
95 |
-
"NFCorpus",
|
96 |
-
"NQ",
|
97 |
-
"QuoraRetrieval",
|
98 |
-
"SCIDOCS",
|
99 |
-
]
|
100 |
-
produce a complete json schema."
|
101 |
-
|
102 |
-
you will recieve a text , classify the text according to the schema above. ONLY PROVIDE THE FINAL JSON , DO NOT PRODUCE ANY ADDITION INSTRUCTION :"""
|
103 |
|
104 |
## add chroma vector store
|
105 |
|
@@ -112,10 +28,15 @@ model = AutoModel.from_pretrained('nvidia/NV-Embed-v1', trust_remote_code=True).
|
|
112 |
|
113 |
## Make intention Mapper
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
def respond(
|
121 |
message,
|
|
|
13 |
import json
|
14 |
import numpy as np
|
15 |
|
16 |
+
import openai
|
17 |
+
from openai import OpenAI
|
18 |
+
from globalvars import API_BASE, API_KEY, intention_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
## add chroma vector store
|
21 |
|
|
|
28 |
|
29 |
## Make intention Mapper
|
30 |
|
31 |
+
intention_client = OpenAI(
|
32 |
+
api_key=API_KEY,
|
33 |
+
base_url=API_BASE
|
34 |
+
)
|
35 |
+
intention_completion = intention_client.chat.completions.create(
|
36 |
+
model="yi-large",
|
37 |
+
messages=[{"role": "system", "content": intention_prompt},{"role": "user", "content": inputext}]
|
38 |
+
)
|
39 |
+
# print(completion)
|
40 |
|
41 |
def respond(
|
42 |
message,
|
globalvars.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Global Variables
|
2 |
+
|
3 |
+
API_BASE = "https://api.01.ai/v1"
|
4 |
+
API_KEY = "your key"
|
5 |
+
|
6 |
+
title = """
|
7 |
+
# 👋🏻Welcome to 🙋🏻♂️Tonic's 📽️Nvidia 🛌🏻Embed V-1 !"""
|
8 |
+
|
9 |
+
description = """
|
10 |
+
You can use this Space to test out the current model [nvidia/NV-Embed-v1](https://huggingface.co/nvidia/NV-Embed-v1). 🐣a generalist embedding model that ranks No. 1 on the Massive Text Embedding Benchmark (MTEB benchmark)(as of May 24, 2024), with 56 tasks, encompassing retrieval, reranking, classification, clustering, and semantic textual similarity tasks.
|
11 |
+
You can also use 📽️Nvidia 🛌🏻Embed V-1 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/NV-Embed?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
|
12 |
+
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [MultiTonic](https://github.com/MultiTonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
13 |
+
"""
|
14 |
+
|
15 |
+
tasks = {
|
16 |
+
'ClimateFEVER': 'Given a claim about climate change, retrieve documents that support or refute the claim',
|
17 |
+
'DBPedia': 'Given a query, retrieve relevant entity descriptions from DBPedia',
|
18 |
+
'FEVER': 'Given a claim, retrieve documents that support or refute the claim',
|
19 |
+
'FiQA2018': 'Given a financial question, retrieve user replies that best answer the question',
|
20 |
+
'HotpotQA': 'Given a multi-hop question, retrieve documents that can help answer the question',
|
21 |
+
'MSMARCO': 'Given a web search query, retrieve relevant passages that answer the query',
|
22 |
+
'NFCorpus': 'Given a question, retrieve relevant documents that best answer the question',
|
23 |
+
'NQ': 'Given a question, retrieve Wikipedia passages that answer the question',
|
24 |
+
'QuoraRetrieval': 'Given a question, retrieve questions that are semantically equivalent to the given question',
|
25 |
+
'SCIDOCS': 'Given a scientific paper title, retrieve paper abstracts that are cited by the given paper',
|
26 |
+
}
|
27 |
+
|
28 |
+
intention_prompt= """
|
29 |
+
"type": "object",
|
30 |
+
"properties": {
|
31 |
+
"ClimateFEVER": {
|
32 |
+
"type": "boolean",
|
33 |
+
"description" : "select this for climate science related text"
|
34 |
+
},
|
35 |
+
"DBPedia": {
|
36 |
+
"type": "boolean",
|
37 |
+
"description" : "select this for encyclopedic related knowledge"
|
38 |
+
},
|
39 |
+
"FEVER": {
|
40 |
+
"type": "boolean",
|
41 |
+
"description": "select this to verify a claim or embed a claim"
|
42 |
+
},
|
43 |
+
"FiQA2018": {
|
44 |
+
"type": "boolean",
|
45 |
+
"description" : "select this for financial questions or topics"
|
46 |
+
},
|
47 |
+
"HotpotQA": {
|
48 |
+
"type": "boolean",
|
49 |
+
"description" : "select this for a multi-hop question or for texts that provide multihop claims"
|
50 |
+
},
|
51 |
+
"MSMARCO": {
|
52 |
+
"type": "boolean",
|
53 |
+
"description": "Given a web search query, retrieve relevant passages that answer the query"
|
54 |
+
},
|
55 |
+
"NFCorpus": {
|
56 |
+
"type": "boolean",
|
57 |
+
"description" : "Given a question, retrieve relevant documents that best answer the question"
|
58 |
+
},
|
59 |
+
"NQ": {
|
60 |
+
"type": "boolean",
|
61 |
+
"description" : "Given a question, retrieve Wikipedia passages that answer the question"
|
62 |
+
},
|
63 |
+
"QuoraRetrieval": {
|
64 |
+
"type": "boolean",
|
65 |
+
"description": "Given a question, retrieve questions that are semantically equivalent to the given question"
|
66 |
+
},
|
67 |
+
"SCIDOCS": {
|
68 |
+
"type": "boolean",
|
69 |
+
"description": "Given a scientific paper title, retrieve paper abstracts that are cited by the given paper"
|
70 |
+
}
|
71 |
+
},
|
72 |
+
"required": [
|
73 |
+
"ClimateFEVER",
|
74 |
+
"DBPedia",
|
75 |
+
"FEVER",
|
76 |
+
"FiQA2018",
|
77 |
+
"HotpotQA",
|
78 |
+
"MSMARCO",
|
79 |
+
"NFCorpus",
|
80 |
+
"NQ",
|
81 |
+
"QuoraRetrieval",
|
82 |
+
"SCIDOCS",
|
83 |
+
]
|
84 |
+
produce a complete json schema."
|
85 |
+
|
86 |
+
you will recieve a text , classify the text according to the schema above. ONLY PROVIDE THE FINAL JSON , DO NOT PRODUCE ANY ADDITION INSTRUCTION :"""
|