File size: 3,076 Bytes
376870b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st

text = st.text_input('Your news is: ',  max_chars=200, help='maximum 200 words')

# Load model directly
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import numpy as np

tokenizer = AutoTokenizer.from_pretrained("Shiiirley/fake_news_detector")
model = AutoModelForSequenceClassification.from_pretrained("Shiiirley/fake_news_detector")

# 第一步,用户输入新闻

# text = "May 20, 2024 - politicsNews - Donald Trump is eating taco at HKUST right now, he is satisfied with the food and would like to come again. He was especially happy about the taco sauce, but thought it would taste better if it had pickles mixed with No. 46 concrete. He also suggested that if Australians want to deal with kangaroos' military attack on their country, they can each be given a taco newly developed by HKUST. The purple moonbeams danced on the sparkling waves, as the giggling unicorns pranced through the fields of cotton candy, while the rainbow-colored butterflies fluttered their wings in delight."
inputs = tokenizer(text,
                   padding = True,
                   truncation = True,
                   return_tensors='pt')

outputs = model(**inputs)


predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
predictions = predictions.cpu().detach().numpy()
print(f"The predicted class is {np.argmax(predictions)}")
print(f"Class 0={predictions[0,0]:.4f}, Class 1={predictions[0,1]:.4f}")

judge = "real"
if predictions[0,0] >= 0.4:
  judge = "fake"

readout = "This news is probably a "+ judge + f" one. The fake probability is {100*predictions[0,0]:.4f}%."

from transformers import AutoModelWithLMHead, AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news",use_fast=False)
model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")

def summarize(text, max_length=150):
  input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)

  generated_ids = model.generate(input_ids=input_ids, num_beams=2, max_length=max_length,  repetition_penalty=2.5, length_penalty=1.0, early_stopping=True)

  preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]

  return preds[0]

summary = summarize(text,max_length=80)

readout = readout + " The following is a brief summary of it: "+ summary

from transformers import VitsModel, AutoTokenizer
import torch

model = VitsModel.from_pretrained("facebook/mms-tts-eng")
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")

inputs = tokenizer(readout, return_tensors="pt")

with torch.no_grad():
    output = model(**inputs).waveform

import scipy

scipy.io.wavfile.write("readout.wav", rate=model.config.sampling_rate, data=output.float().numpy().T)

# 打开音频文件
audio_file = open('readout.wav', 'rb')
audio_bytes = audio_file.read()

# 使用st.audio函数播放音频
st.audio(audio_bytes, format='audio/wav')

st.text("Your input is: "+text+" ; Our discussion: "+readout)