Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import json | |
import re | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
model_name = 'yuntian-deng/ak-paper-selection-deberta' | |
max_length = 512 | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
model.eval() | |
if torch.cuda.is_available(): | |
model.cuda() | |
validation_results = json.load(open('validation_results.json')) | |
scores, thresholds, precisions, recalls = validation_results['scores'], validation_results['thresholds'], validation_results['precisions'], validation_results['recalls'] | |
def get_threshold_precision(score_): | |
for score, threshold, precision, recall in zip(scores, thresholds, precisions, recalls): | |
if score_ < score: | |
break | |
prev_score, prev_threshold, prev_precision, prev_recall = score, threshold, precision, recall | |
if prev_threshold == prev_score: | |
prev_threshold = score_ | |
return prev_threshold, prev_precision, prev_recall | |
def extract_arxiv_id(input_text): | |
if 'arxiv.org' in input_text: | |
parsed_url = urlparse(input_text) | |
query = parse_qs(parsed_url.query) | |
path_parts = parsed_url.path.split('/') | |
if 'id_list' in query: | |
return query['id_list'][0] | |
elif path_parts[-2] in ['abs', 'pdf']: | |
return path_parts[-1].replace('.pdf', '') | |
return input_text | |
def fetch_arxiv_data(arxiv_id): | |
time.sleep(3) # Comply with arXiv API terms of usage | |
query_url = f'http://export.arxiv.org/api/query?id_list={arxiv_id}' | |
response = feedparser.parse(query_url) | |
if response.entries: | |
entry = response.entries[0] | |
title = entry.title | |
authors = ', '.join(author.name for author in entry.authors) | |
abstract = entry.summary | |
return title, authors, abstract | |
return "", "", "" | |
def update_fields(url_or_id): | |
arxiv_id = extract_arxiv_id(url_or_id) | |
title, authors, abstract = fetch_arxiv_data(arxiv_id) | |
return title, authors, abstract | |
def normalize_spaces(text): | |
return re.sub(r'\s+', ' ', text).strip() | |
def fill_template(title, authors, abstract): | |
title = normalize_spaces(title.replace('\n', ' ')) | |
authors = ', '.join([author.strip() for author in authors.split(',')]) | |
abstract = normalize_spaces(abstract.replace('\n', ' ')) | |
text = f"""Title: {title} | |
Authors: {authors} | |
Abstract: {abstract}""" | |
return text | |
def model_inference(title, authors, abstract): | |
text = fill_template(title, authors, abstract) | |
print (text) | |
inputs = tokenizer([text], return_tensors="pt", truncation=True, max_length=max_length) | |
if torch.cuda.is_available(): | |
inputs = {key: value.cuda() for key, value in inputs.items()} | |
outputs = model(**inputs) | |
logits = outputs.logits | |
probs = logits.softmax(dim=-1).view(-1) | |
score = probs[1].item() | |
return score | |
def predict(title, authors, abstract): | |
# Your model prediction logic here | |
score = model_inference(title, authors, abstract) | |
# Calculate precision for scores >= the predicted score | |
#selected = [d for d in validation_data if d['score'] >= score] | |
#true_positives = sum(1 for d in selected if d['label'] == 1) | |
#precision = true_positives / len(selected) if selected else 0 | |
threshold, precision, recall = get_threshold_precision(score) | |
result = f"Your score: {score:.2f}.\nFor papers with score>={threshold:.2f}, {precision * 100:.2f}% are selected by AK.\nFor papers selected by AK, {recall * 100:.2f}% have score>={threshold:.2f}" | |
return score, result | |
example_title = "WildChat: 1M ChatGPT Interaction Logs in the Wild" | |
example_authors = "Wenting Zhao, Xiang Ren, Jack Hessel, Claire Cardie, Yejin Choi, Yuntian Deng" | |
example_abstract = "Chatbots such as GPT-4 and ChatGPT are now serving millions of users. Despite their widespread use, there remains a lack of public datasets showcasing how these tools are used by a population of users in practice. To bridge this gap, we offered free access to ChatGPT for online users in exchange for their affirmative, consensual opt-in to anonymously collect their chat transcripts and request headers. From this, we compiled WildChat, a corpus of 1 million user-ChatGPT conversations, which consists of over 2.5 million interaction turns. We compare WildChat with other popular user-chatbot interaction datasets, and find that our dataset offers the most diverse user prompts, contains the largest number of languages, and presents the richest variety of potentially toxic use-cases for researchers to study. In addition to timestamped chat transcripts, we enrich the dataset with demographic data, including state, country, and hashed IP addresses, alongside request headers. This augmentation allows for more detailed analysis of user behaviors across different geographical regions and temporal dimensions. Finally, because it captures a broad range of use cases, we demonstrate the dataset’s potential utility in fine-tuning instruction-following models. WildChat is released at https://wildchat.allen.ai under AI2 ImpACT Licenses." | |
iface = gr.Interface( | |
fn=predict, | |
inputs=[gr.Textbox(label="Paper Title", placeholder="Enter paper title", value=example_title), | |
gr.Textbox(label="Authors (separated by comma)", placeholder="Enter authors (separated by comma)", value=example_authors), | |
gr.TextArea(label="Abstract", placeholder="Enter abstract", value=example_abstract), | |
gr.Textbox(label="[Optional] Autofill using arXiv URL/ID", placeholder="[Optional] Autofill using arXiv URL/ID", on_change=update_fields, change_elements=["title", "authors", "abstract"]), | |
], | |
outputs=[gr.Textbox(label="Predicted Score"), gr.Textbox(label="Predicted Selection Probability")], | |
title="Paper Selection Prediction", | |
description="Predict if @_akhaliq will select your paper into Hugging Face papers. Enter the title, authors, and abstract of your paper, or enter an arXiv URL/ID.", | |
live=False, | |
) | |
iface.queue(concurrency_limit=1, max_size=20).launch(enable_queue=True) | |