Spaces:
Runtime error
Runtime error
from transformers.pipelines.image_segmentation import Predictions | |
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer | |
import unidecode, re, unicodedata | |
from bs4 import BeautifulSoup | |
from urllib.request import urlopen | |
from urllib.parse import urlparse | |
from sklearn.metrics import confusion_matrix, accuracy_score | |
import torch.nn.functional as F | |
import gradio as gr | |
import torch | |
import nltk | |
def check_by_url(txt_url): | |
parsed_url = urlparse(txt_url) | |
url = (f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path.rsplit('/', 1)[0]}/") | |
print(url) | |
new_data = [] | |
page = urlopen(url=url).read().decode("utf-8") | |
soup = BeautifulSoup(page, "html.parser") | |
title = soup.find("title").get_text() | |
# remove punctuations from title | |
def remove_punctuation(title): | |
punctuationfree = "".join([i for i in title if i not in string.punctuation]) | |
return punctuationfree | |
css_class_to_remove = ("dp-highlighter") # Replace with the CSS class you want to remove | |
# Find <div> tags with the specified CSS class and remove their content | |
div_tags = soup.find_all(["code", "pre"]) | |
for div_tag in div_tags: | |
div_tag.clear() | |
div_tags = soup.find_all("div", class_=css_class_to_remove) | |
for div_tag in div_tags: | |
div_tag.clear() | |
# Fetch content of remaining tags | |
content_with_style = "" | |
p_tags_with_style = soup.find_all("p", style=True) | |
for p_tag in p_tags_with_style: | |
p_content = re.sub(r"\n", "", p_tag.get_text()) | |
content_with_style += p_content | |
# Fetch content of <p> tags without style | |
content_without_style = "" | |
p_tags_without_style = soup.find_all("p", style=False) | |
for p_tag in p_tags_without_style: | |
p_content = re.sub(r"\n", "", p_tag.get_text()) | |
content_without_style += p_content | |
# Replace Unicode characters in the content and remove duplicates | |
normalized_content_with_style = re.sub(r"\s+", " ", content_with_style) # Remove extra spaces | |
normalized_content_with_style = normalized_content_with_style.replace("\r", "") # Replace '\r' characters | |
normalized_content_with_style = unicodedata.normalize("NFKD", normalized_content_with_style) | |
normalized_content_with_style = unidecode.unidecode(normalized_content_with_style) | |
normalized_content_without_style = re.sub(r"\s+", " ", content_without_style) # Remove extra spaces | |
normalized_content_without_style = normalized_content_without_style.replace("\r", "") # Replace '\r' characters | |
normalized_content_without_style = unicodedata.normalize("NFKD", normalized_content_without_style) | |
normalized_content_without_style = unidecode.unidecode(normalized_content_without_style) | |
normalized_content_with_style += normalized_content_without_style | |
new_data = {"title": title, "content": normalized_content_with_style} | |
# return new_data | |
model = DistilBertForSequenceClassification.from_pretrained(".") | |
tokenizer = DistilBertTokenizer.from_pretrained(".") | |
label_mapping = {1: "SFW", 0: "NSFW"} | |
test_encodings = tokenizer.encode_plus( | |
title, | |
truncation=True, | |
padding=True, | |
max_length=512, | |
return_tensors="pt" | |
) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
test_input_ids = test_encodings["input_ids"].to(device) | |
test_attention_mask = test_encodings["attention_mask"].to(device) | |
with torch.no_grad(): | |
model = model.to(device) | |
model.eval() | |
outputs = model(test_input_ids, attention_mask=test_attention_mask) | |
logits = outputs.logits | |
predicted_labels = torch.argmax(logits, dim=1) | |
probabilities = F.softmax(logits, dim=1) | |
confidence_score_title = torch.max(probabilities, dim=1).values.tolist() | |
predicted_label_title = label_mapping[predicted_labels.item()] | |
test_encodings = tokenizer.encode_plus( | |
normalized_content_with_style, | |
truncation=True, | |
padding=True, | |
max_length=512, | |
return_tensors="pt", | |
) | |
test_input_ids = test_encodings["input_ids"].to(device) | |
test_attention_mask = test_encodings["attention_mask"].to(device) | |
with torch.no_grad(): | |
outputs = model(test_input_ids, attention_mask=test_attention_mask) | |
logits = outputs.logits | |
predicted_labels = torch.argmax(logits, dim=1) | |
probabilities = F.softmax(logits, dim=1) | |
confidence_scores_content = torch.max(probabilities, dim=1).values.tolist() | |
predicted_label_content = label_mapping[predicted_labels.item()] | |
return ( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
#new1, | |
) | |
label_mapping = {1: "SFW", 0: "NSFW"} # 1:True 0:false | |
def predict_2(txt_url, normalized_content_with_style): | |
( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
) = (None, None, None, None, None) | |
predicted_label_text, confidence_score_text = None, None | |
if txt_url.startswith("http://") or txt_url.startswith("https://"): | |
( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
) = check_by_url(txt_url) | |
elif txt_url.startswith(""): | |
model = DistilBertForSequenceClassification.from_pretrained(".") | |
tokenizer = DistilBertTokenizer.from_pretrained(".") | |
test_encodings = tokenizer.encode_plus( | |
normalized_content_with_style, | |
truncation=True, | |
padding=True, | |
max_length=512, | |
return_tensors="pt", | |
) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
test_input_ids = test_encodings["input_ids"].to(device) | |
test_attention_mask = test_encodings["attention_mask"].to(device) | |
with torch.no_grad(): | |
model = model.to(device) | |
model.eval() | |
outputs = model(test_input_ids, attention_mask=test_attention_mask) | |
logits = outputs.logits | |
predicted_labels = torch.argmax(logits, dim=1) | |
probabilities = F.softmax(logits, dim=1) | |
confidence_score_text = torch.max(probabilities, dim=1).values.tolist() | |
predicted_label_text = label_mapping[predicted_labels.item()] | |
return ( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
predicted_label_text, | |
confidence_score_text, | |
#new, | |
) | |
def word_by_word(txt_url, normalized_content_with_style): | |
if txt_url.startswith("http://") or txt_url.startswith("https://") or txt_url.startswith(""): | |
( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
predicted_label_text, | |
confidence_score_text, | |
) = predict_2(txt_url, normalized_content_with_style) | |
model = DistilBertForSequenceClassification.from_pretrained(".") | |
tokenizer = DistilBertTokenizer.from_pretrained(".") | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = model.to(device) | |
model.eval() | |
new_word={} | |
content_words =[] | |
words_2 =[] | |
if predicted_label_content=="NSFW" or predicted_label_text=="NSFW": | |
if txt_url.startswith("http://") or txt_url.startswith("https://"): | |
content_words = new_data['content'].split() | |
else: | |
words_2 = normalized_content_with_style.split() | |
results = [] | |
for word in content_words or words_2 : | |
encoding = tokenizer.encode_plus( | |
word, | |
truncation=True, | |
padding=True, | |
max_length=512, | |
return_tensors="pt" | |
) | |
input_ids = encoding["input_ids"].to(device) | |
attention_mask = encoding["attention_mask"].to(device) | |
with torch.no_grad(): | |
outputs = model(input_ids, attention_mask=attention_mask) | |
logits = outputs.logits | |
probabilities = F.softmax(logits, dim=1) | |
predicted_label = torch.argmax(logits, dim=1).item() | |
#label_mapping = {1: "SFW", 0: "NSFW"} # 1:True 0:False | |
predicted_label_word = label_mapping[predicted_label] | |
confidence_score_word = torch.max(probabilities, dim=1).values.item() | |
#new_word={} | |
if predicted_label_word=="NSFW": | |
result = {"Word": word, "Label": predicted_label_word, "Confidence": confidence_score_word} | |
results.append(result) | |
new_word = json.dumps(results) | |
return( | |
predicted_label_title, | |
confidence_score_title, | |
predicted_label_content, | |
confidence_scores_content, | |
new_data, | |
predicted_label_text, | |
confidence_score_text, | |
new_word, | |
) | |
demo = gr.Interface( | |
fn=word_by_word, | |
inputs=[ | |
gr.inputs.Textbox(label="URL", placeholder="Enter URL"), | |
gr.inputs.Textbox(label="Text", placeholder="Enter Text"), | |
], | |
outputs=[ | |
gr.outputs.Textbox(label="Title_prediction"), | |
gr.outputs.Textbox(label="Title_confidence_score"), | |
gr.outputs.Textbox(label="Content_prediction"), | |
gr.outputs.Textbox(label="Content_confidence_score"), | |
gr.outputs.Textbox(label="Description").style(show_copy_button=True), | |
gr.outputs.Textbox(label="Text_prediction_score"), | |
gr.outputs.Textbox(label="Text_confidence_score"), | |
gr.outputs.Textbox(label="per word classification").style(show_copy_button=True), | |
], | |
) | |
demo.launch() |