Spaces:
Paused
Paused
zhenyundeng
commited on
Commit
•
c36717c
1
Parent(s):
a851bc3
update
Browse files- app.py +49 -1
- requirements.txt +25 -3
app.py
CHANGED
@@ -1,13 +1,61 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
app = FastAPI()
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
@app.get("/")
|
|
|
8 |
def greet_json():
|
9 |
return {"Hello": "World!"}
|
10 |
|
|
|
11 |
if __name__ == "__main__":
|
12 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
import spaces
|
4 |
+
import torch
|
5 |
+
from pydantic import BaseModel
|
6 |
+
from transformers import RobertaTokenizer, RobertaForSequenceClassification
|
7 |
+
|
8 |
+
if torch.cuda.is_available():
|
9 |
+
tokenizer = RobertaTokenizer.from_pretrained('Dzeniks/roberta-fact-check')
|
10 |
+
fc_model = RobertaForSequenceClassification.from_pretrained('Dzeniks/roberta-fact-check')
|
11 |
|
12 |
app = FastAPI()
|
13 |
|
14 |
+
# ------------------------------------------------------------------------
|
15 |
+
class Item(BaseModel):
|
16 |
+
claim: str
|
17 |
+
evidence: str
|
18 |
+
|
19 |
+
|
20 |
+
@app.post("/predict/")
|
21 |
+
@spaces.GPU
|
22 |
+
def fact_checking(item: Item):
|
23 |
+
# # claim = item['claim']
|
24 |
+
# # source = item['source']
|
25 |
+
# claim = item.claim
|
26 |
+
# source = item.source
|
27 |
+
|
28 |
+
claim = item.claim
|
29 |
+
evidence = item.evidence
|
30 |
+
# claim = item['claim']
|
31 |
+
# evidence = item['evidence']
|
32 |
+
|
33 |
+
input = tokenizer.encode_plus(claim, evidence, return_tensors="pt")
|
34 |
+
fc_model.eval()
|
35 |
+
with torch.no_grad():
|
36 |
+
outputs = fc_model(**input)
|
37 |
+
|
38 |
+
label = torch.argmax(outputs[0]).item()
|
39 |
+
|
40 |
+
return {"Verdict": label}
|
41 |
+
|
42 |
+
|
43 |
@app.get("/")
|
44 |
+
@spaces.GPU
|
45 |
def greet_json():
|
46 |
return {"Hello": "World!"}
|
47 |
|
48 |
+
|
49 |
if __name__ == "__main__":
|
50 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
51 |
+
|
52 |
+
|
53 |
+
# if __name__ == "__main__":
|
54 |
+
# item = {
|
55 |
+
# "claim": "Albert Einstein work in the field of computer science.",
|
56 |
+
# "evidence": "Albert Einstein was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time.",
|
57 |
+
# }
|
58 |
+
#
|
59 |
+
# results = fact_checking(item)
|
60 |
+
#
|
61 |
+
# print(results)
|
requirements.txt
CHANGED
@@ -1,3 +1,25 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
nltk
|
3 |
+
rank_bm25
|
4 |
+
accelerate
|
5 |
+
trafilatura
|
6 |
+
spacy
|
7 |
+
pytorch_lightning
|
8 |
+
transformers==4.29.2
|
9 |
+
SentencePiece
|
10 |
+
datasets
|
11 |
+
leven
|
12 |
+
scikit-learn
|
13 |
+
pexpect
|
14 |
+
elasticsearch
|
15 |
+
torch
|
16 |
+
huggingface_hub
|
17 |
+
google-api-python-client
|
18 |
+
wikipedia-api
|
19 |
+
beautifulsoup4
|
20 |
+
azure-storage-file-share
|
21 |
+
azure-storage-blob
|
22 |
+
bm25s
|
23 |
+
PyStemmer
|
24 |
+
lxml_html_clean
|
25 |
+
spaces
|