Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +76 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
text = st.text_input('Your news is: ', max_chars=200, help='maximum 200 words')
|
4 |
+
|
5 |
+
# Load model directly
|
6 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
7 |
+
import torch
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained("Shiiirley/fake_news_detector")
|
11 |
+
model = AutoModelForSequenceClassification.from_pretrained("Shiiirley/fake_news_detector")
|
12 |
+
|
13 |
+
# 第一步,用户输入新闻
|
14 |
+
|
15 |
+
# text = "May 20, 2024 - politicsNews - Donald Trump is eating taco at HKUST right now, he is satisfied with the food and would like to come again. He was especially happy about the taco sauce, but thought it would taste better if it had pickles mixed with No. 46 concrete. He also suggested that if Australians want to deal with kangaroos' military attack on their country, they can each be given a taco newly developed by HKUST. The purple moonbeams danced on the sparkling waves, as the giggling unicorns pranced through the fields of cotton candy, while the rainbow-colored butterflies fluttered their wings in delight."
|
16 |
+
inputs = tokenizer(text,
|
17 |
+
padding = True,
|
18 |
+
truncation = True,
|
19 |
+
return_tensors='pt')
|
20 |
+
|
21 |
+
outputs = model(**inputs)
|
22 |
+
|
23 |
+
|
24 |
+
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
25 |
+
predictions = predictions.cpu().detach().numpy()
|
26 |
+
print(f"The predicted class is {np.argmax(predictions)}")
|
27 |
+
print(f"Class 0={predictions[0,0]:.4f}, Class 1={predictions[0,1]:.4f}")
|
28 |
+
|
29 |
+
judge = "real"
|
30 |
+
if predictions[0,0] >= 0.4:
|
31 |
+
judge = "fake"
|
32 |
+
|
33 |
+
readout = "This news is probably a "+ judge + f" one. The fake probability is {100*predictions[0,0]:.4f}%."
|
34 |
+
|
35 |
+
from transformers import AutoModelWithLMHead, AutoTokenizer
|
36 |
+
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news",use_fast=False)
|
38 |
+
model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")
|
39 |
+
|
40 |
+
def summarize(text, max_length=150):
|
41 |
+
input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
|
42 |
+
|
43 |
+
generated_ids = model.generate(input_ids=input_ids, num_beams=2, max_length=max_length, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True)
|
44 |
+
|
45 |
+
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
|
46 |
+
|
47 |
+
return preds[0]
|
48 |
+
|
49 |
+
summary = summarize(text,max_length=80)
|
50 |
+
|
51 |
+
readout = readout + " The following is a brief summary of it: "+ summary
|
52 |
+
|
53 |
+
from transformers import VitsModel, AutoTokenizer
|
54 |
+
import torch
|
55 |
+
|
56 |
+
model = VitsModel.from_pretrained("facebook/mms-tts-eng")
|
57 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
|
58 |
+
|
59 |
+
inputs = tokenizer(readout, return_tensors="pt")
|
60 |
+
|
61 |
+
with torch.no_grad():
|
62 |
+
output = model(**inputs).waveform
|
63 |
+
|
64 |
+
import scipy
|
65 |
+
|
66 |
+
scipy.io.wavfile.write("readout.wav", rate=model.config.sampling_rate, data=output.float().numpy().T)
|
67 |
+
|
68 |
+
# 打开音频文件
|
69 |
+
audio_file = open('readout.wav', 'rb')
|
70 |
+
audio_bytes = audio_file.read()
|
71 |
+
|
72 |
+
# 使用st.audio函数播放音频
|
73 |
+
st.audio(audio_bytes, format='audio/wav')
|
74 |
+
|
75 |
+
st.text("Your input is: "+text+" ; Our discussion: "+readout)
|
76 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
sentencepiece
|
5 |
+
scipy
|