MetaRefine / app.py
davanstrien's picture
davanstrien HF staff
add single scoring tab
6307f82
import asyncio
import copy
import json
import os
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
from functools import lru_cache
from json import JSONDecodeError
from typing import Any, Dict, List, Optional, Union
import gradio as gr
import httpx
import orjson
from cachetools import TTLCache, cached
from cashews import NOT_NONE, cache
from dotenv import load_dotenv
from httpx import AsyncClient, Client
from huggingface_hub import hf_hub_url, logging
from huggingface_hub.utils import disable_progress_bars
from rich import print
from tqdm.auto import tqdm
load_dotenv() # take environment variables from .env.
CACHE_EXPIRY_TIME = timedelta(hours=3)
sync_cache = TTLCache(maxsize=200_000, ttl=CACHE_EXPIRY_TIME, timer=datetime.now)
cache.setup("mem://")
disable_progress_bars()
logging.set_verbosity_error()
if token := os.getenv("HF_TOKEN"):
headers = {"authorization": f"Bearer {token}"}
else:
raise EnvironmentError("No token found")
async def get_model_labels(model, client):
try:
url = hf_hub_url(repo_id=model, filename="config.json")
resp = await client.get(url, timeout=2)
return list(resp.json()["label2id"].keys())
except (KeyError, JSONDecodeError, AttributeError):
return None
def get_model_labels_sync(model, client=None):
if not client:
client = Client(headers=headers)
try:
url = hf_hub_url(repo_id=model, filename="config.json")
resp = client.get(url, timeout=2)
return list(resp.json()["label2id"].keys())
except (KeyError, JSONDecodeError, AttributeError):
return None
async def _try_load_model_card(hub_id, client=None):
if not client:
client = AsyncClient(headers=headers)
try:
url = hf_hub_url(
repo_id=hub_id, filename="README.md"
) # We grab card this way rather than via client library to improve performance
resp = await client.get(url)
if resp.status_code == 200:
card_text = resp.text
length = len(card_text)
elif resp.status_code == 404:
card_text = None
length = 0
except httpx.ConnectError:
card_text = None
length = None
return card_text, length
def _try_load_model_card_sync(hub_id, client=None):
if not client:
client = Client(headers=headers)
try:
url = hf_hub_url(
repo_id=hub_id, filename="README.md"
) # We grab card this way rather than via client library to improve performance
resp = client.get(url)
if resp.status_code == 200:
card_text = resp.text
length = len(card_text)
elif resp.status_code == 404:
card_text = None
length = 0
except httpx.ConnectError:
card_text = None
length = None
return card_text, length
def _try_parse_card_data(hub_json_data):
data = {}
keys = ["license", "language", "datasets"]
for key in keys:
if card_data := hub_json_data.get("cardData"):
try:
data[key] = card_data.get(key)
except (KeyError, AttributeError):
data[key] = None
else:
data[key] = None
return data
@dataclass(eq=False)
class ModelMetadata:
hub_id: str
tags: Optional[List[str]]
license: Optional[str]
library_name: Optional[str]
datasets: Optional[List[str]]
pipeline_tag: Optional[str]
labels: Optional[List[str]]
languages: Optional[Union[str, List[str]]]
model_card_text: Optional[str] = None
model_card_length: Optional[int] = None
likes: Optional[int] = None
downloads: Optional[int] = None
created_at: Optional[datetime] = None
@classmethod
@cache(ttl=CACHE_EXPIRY_TIME, condition=NOT_NONE)
async def from_hub(cls, hub_id, client=None):
try:
if not client:
client = httpx.AsyncClient()
url = f"https://huggingface.co/api/models/{hub_id}"
resp = await client.get(url)
hub_json_data = resp.json()
card_text, length = await _try_load_model_card(hub_id)
data = _try_parse_card_data(hub_json_data)
library_name = hub_json_data.get("library_name")
pipeline_tag = hub_json_data.get("pipeline_tag")
downloads = hub_json_data.get("downloads")
likes = hub_json_data.get("likes")
tags = hub_json_data.get("tags")
labels = await get_model_labels(hub_id, client)
return ModelMetadata(
hub_id=hub_id,
languages=data["language"],
tags=tags,
license=data["license"],
library_name=library_name,
datasets=data["datasets"],
pipeline_tag=pipeline_tag,
labels=labels,
model_card_text=card_text,
downloads=downloads,
likes=likes,
model_card_length=length,
)
except Exception as e:
print(f"Failed to create ModelMetadata for model {hub_id}: {str(e)}")
return None
@dataclass(eq=False)
class ModelMetadataSync:
hub_id: str
tags: Optional[List[str]]
license: Optional[str]
library_name: Optional[str]
datasets: Optional[List[str]]
pipeline_tag: Optional[str]
labels: Optional[List[str]]
languages: Optional[Union[str, List[str]]]
model_card_text: Optional[str] = None
model_card_length: Optional[int] = None
likes: Optional[int] = None
downloads: Optional[int] = None
created_at: Optional[datetime] = None
@classmethod
def from_hub(cls, hub_id, client=None):
try:
if not client:
client = httpx.Client(headers=headers)
url = f"https://huggingface.co/api/models/{hub_id}"
resp = client.get(url)
hub_json_data = resp.json()
card_text, length = _try_load_model_card_sync(hub_id)
data = _try_parse_card_data(hub_json_data)
library_name = hub_json_data.get("library_name")
pipeline_tag = hub_json_data.get("pipeline_tag")
downloads = hub_json_data.get("downloads")
likes = hub_json_data.get("likes")
tags = hub_json_data.get("tags")
labels = get_model_labels_sync(hub_id, client)
return ModelMetadata(
hub_id=hub_id,
languages=data["language"],
tags=tags,
license=data["license"],
library_name=library_name,
datasets=data["datasets"],
pipeline_tag=pipeline_tag,
labels=labels,
model_card_text=card_text,
downloads=downloads,
likes=likes,
model_card_length=length,
)
except Exception as e:
print(f"Failed to create ModelMetadata for model {hub_id}: {str(e)}")
return None
COMMON_SCORES = {
"license": {
"required": True,
"score": 2,
"missing_recommendation": (
"You have not added a license to your models metadata"
),
},
"datasets": {
"required": False,
"score": 1,
"missing_recommendation": (
"You have not added any datasets to your models metadata"
),
},
"model_card_text": {
"required": True,
"score": 3,
"missing_recommendation": """You haven't created a model card for your model. It is strongly recommended to have a model card for your model. \nYou can create for your model by clicking [here](https://huggingface.co/HUB_ID/edit/main/README.md)""",
},
"tags": {
"required": False,
"score": 2,
"missing_recommendation": (
"You don't have any tags defined in your model metadata. Tags can help"
" people find relevant models on the Hub. You can create for your model by"
" clicking [here](https://huggingface.co/HUB_ID/edit/main/README.md)"
),
},
}
TASK_TYPES_WITH_LANGUAGES = {
"text-classification",
"token-classification",
"table-question-answering",
"question-answering",
"zero-shot-classification",
"translation",
"summarization",
"text-generation",
"text2text-generation",
"fill-mask",
"sentence-similarity",
"text-to-speech",
"automatic-speech-recognition",
"text-to-image",
"image-to-text",
"visual-question-answering",
"document-question-answering",
}
LABELS_REQUIRED_TASKS = {
"text-classification",
"token-classification",
"object-detection",
"audio-classification",
"image-classification",
"tabular-classification",
}
ALL_PIPELINES = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"conversational",
"depth-estimation",
"document-question-answering",
"feature-extraction",
"fill-mask",
"graph-ml",
"image-classification",
"image-segmentation",
"image-to-image",
"image-to-text",
"object-detection",
"question-answering",
"reinforcement-learning",
"robotics",
"sentence-similarity",
"summarization",
"table-question-answering",
"tabular-classification",
"tabular-regression",
"text-classification",
"text-generation",
"text-to-image",
"text-to-speech",
"text-to-video",
"text2text-generation",
"token-classification",
"translation",
"unconditional-image-generation",
"video-classification",
"visual-question-answering",
"voice-activity-detection",
"zero-shot-classification",
"zero-shot-image-classification",
}
formatted_scores = "\n"
for k, v in COMMON_SCORES.items():
formatted_scores += f"{k}:{v}" + "\n"
@lru_cache()
def generate_task_scores_dict():
task_scores = {}
for task in ALL_PIPELINES:
task_dict = copy.deepcopy(COMMON_SCORES)
if task in TASK_TYPES_WITH_LANGUAGES:
task_dict = {
**task_dict,
**{
"languages": {
"required": True,
"score": 2,
"missing_recommendation": (
"You haven't defined any languages in your metadata. This"
f" is usually recommend for {task} task"
),
}
},
}
if task in LABELS_REQUIRED_TASKS:
task_dict = {
**task_dict,
**{
"labels": {
"required": True,
"score": 2,
"missing_recommendation": (
"You haven't defined any labels in the config.json file"
f" these are usually recommended for {task}"
),
}
},
}
max_score = sum(value["score"] for value in task_dict.values())
task_dict["_max_score"] = max_score
task_scores[task] = task_dict
return task_scores
@lru_cache()
def generate_common_scores():
GENERIC_SCORES = copy.deepcopy(COMMON_SCORES)
GENERIC_SCORES["_max_score"] = sum(
value["score"] for value in GENERIC_SCORES.values()
)
return GENERIC_SCORES
SCORES = generate_task_scores_dict()
GENERIC_SCORES = generate_common_scores()
@cached(sync_cache)
def _basic_check(data: Optional[ModelMetadata]):
score = 0
if data is None:
return None
hub_id = data.hub_id
to_fix = {}
if task := data.pipeline_tag:
task_scores = SCORES[task]
data_dict = asdict(data)
for k, v in task_scores.items():
if k.startswith("_"):
continue
if data_dict[k] is None:
to_fix[k] = task_scores[k]["missing_recommendation"].replace(
"HUB_ID", hub_id
)
if data_dict[k] is not None:
score += v["score"]
max_score = task_scores["_max_score"]
score = score / max_score
(
f"Your model's metadata score is {round(score*100)}% based on suggested"
f" metadata for {task}. \n"
)
if to_fix:
recommendations = (
"Here are some suggestions to improve your model's metadata for"
f" {task}: \n"
)
for v in to_fix.values():
recommendations += f"\n- {v}"
data_dict["recommendations"] = recommendations
data_dict["score"] = score * 100
else:
data_dict = asdict(data)
for k, v in GENERIC_SCORES.items():
if k.startswith("_"):
continue
if data_dict[k] is None:
to_fix[k] = GENERIC_SCORES[k]["missing_recommendation"].replace(
"HUB_ID", hub_id
)
if data_dict[k] is not None:
score += v["score"]
score = score / GENERIC_SCORES["_max_score"]
data_dict["score"] = max(
0, (score / 2) * 100
) # TODO currently setting a manual penalty for not having a task
return orjson.dumps(data_dict)
def basic_check(hub_id): # add types
return _basic_check(hub_id)
@cached(sync_cache)
def basic_check_from_hub_id(hub_id):
model_data = ModelMetadataSync.from_hub(hub_id)
return orjson.loads(basic_check(model_data))
def create_query_url(query, skip=0):
return f"https://huggingface.co/api/search/full-text?q={query}&limit=100&skip={skip}&type=model"
def get_results(query, sync_client=None) -> Dict[Any, Any]:
if not sync_client:
sync_client = Client(http2=True, headers=headers)
url = create_query_url(query)
r = sync_client.get(url)
return r.json()
def parse_single_result(result):
name, filename = result["name"], result["fileName"]
search_result_file_url = hf_hub_url(name, filename)
repo_hub_url = f"https://huggingface.co/{name}"
return {
"name": name,
"search_result_file_url": search_result_file_url,
"repo_hub_url": repo_hub_url,
}
@cache(ttl=timedelta(hours=3), condition=NOT_NONE)
async def get_hub_models(results, client=None):
parsed_results = [parse_single_result(result) for result in results]
if not client:
client = AsyncClient(http2=True, headers=headers)
model_ids = [result["name"] for result in parsed_results]
model_objs = [ModelMetadata.from_hub(model, client=client) for model in model_ids]
models = await asyncio.gather(*model_objs)
results = []
for result, model in zip(parsed_results, models):
score = _basic_check(model)
# print(f"score for {model} is {score}")
if score is not None:
score = orjson.loads(score)
result["metadata_score"] = score["score"]
result["model_card_length"] = score["model_card_length"]
result["is_licensed"] = (bool(score["license"]),)
results.append(result)
else:
results.append(None)
return results
def filter_for_license(results):
for result in results:
if result["is_licensed"]:
yield result
def filter_for_min_model_card_length(results, min_model_card_length):
for result in results:
if result["model_card_length"] > min_model_card_length:
yield result
def filter_search_results(
results: List[Dict[Any, Any]],
min_score=None,
min_model_card_length=None,
): # TODO make code more intuitive
# TODO setup filters as separate functions and chain results
results = asyncio.run(get_hub_models(results))
for i, parsed_result in tqdm(enumerate(results)):
# parsed_result = parse_single_result(result)
if parsed_result is None:
continue
if (
min_score is None
and min_model_card_length is not None
and parsed_result["model_card_length"] > min_model_card_length
or min_score is None
and min_model_card_length is None
):
parsed_result["original_position"] = i
yield parsed_result
elif min_score is not None:
if parsed_result["metadata_score"] <= min_score:
continue
if (
min_model_card_length is not None
and parsed_result["model_card_length"] > min_model_card_length
or min_model_card_length is None
):
parsed_result["original_position"] = i
yield parsed_result
def sort_search_results(
filtered_search_results,
first_sort_key="metadata_score",
second_sort_key="original_position", # TODO expose these in results
):
return sorted(
list(filtered_search_results),
key=lambda x: (x[first_sort_key], x[second_sort_key]),
reverse=True,
)
def find_context(text, query, window_size):
# Split the text into words
words = text.split()
# Find the index of the query token
try:
index = words.index(query)
# Get the start and end indices of the context window
start = max(0, index - window_size)
end = min(len(words), index + window_size + 1)
return " ".join(words[start:end])
except ValueError:
return " ".join(words[:window_size])
def create_markdown(results): # TODO move to separate file
rows = []
for result in results:
row = f"""# [{result['name']}]({result['repo_hub_url']})
| Metadata Quality Score | Model card length | Licensed |
|------------------------|-------------------|----------|
| {result['metadata_score']:.0f}% | {result['model_card_length']} | {"&#9989;" if result['is_licensed'] else "&#10060;"} |
\n
*{result['text']}*
<hr>
\n"""
rows.append(row)
return "\n".join(rows)
async def get_result_card_snippet(result, query=None, client=None):
if not client:
client = AsyncClient(http2=True, headers=headers)
try:
resp = await client.get(result["search_result_file_url"])
result_text = resp.text
result["text"] = find_context(result_text, query, 100)
except httpx.ConnectError:
result["text"] = "Could not load model card"
return result
@cache(ttl=timedelta(hours=3), condition=NOT_NONE)
async def get_result_card_snippets(results, query=None, client=None):
if not client:
client = AsyncClient(http2=True, headers=headers)
result_snippets = [
get_result_card_snippet(result, query=query, client=client)
for result in results
]
results = await asyncio.gather(*result_snippets)
return results
sync_client = Client(http2=True, headers=headers)
def _search_hub(
query: str,
min_score: Optional[int] = None,
min_model_card_length: Optional[int] = None,
):
results = get_results(query, sync_client)
print(f"Found {len(results['hits'])} results")
results = results["hits"]
number_original_results = len(results)
filtered_results = filter_search_results(
results, min_score=min_score, min_model_card_length=min_model_card_length
)
filtered_results = sort_search_results(filtered_results)
final_results = asyncio.run(get_result_card_snippets(filtered_results, query=query))
percent_of_original = round(
len(final_results) / number_original_results * 100, ndigits=0
)
filtered_vs_og = f"""
| Number of original results | Number of results after filtering | Percentage of results after filtering |
| -------------------------- | --------------------------------- | -------------------------------------------- |
| {number_original_results} | {len(final_results)} | {percent_of_original}% |
"""
return filtered_vs_og, create_markdown(final_results)
def search_hub(query: str, min_score=None, min_model_card_length=None):
return _search_hub(query, min_score, min_model_card_length)
with gr.Blocks() as demo:
with gr.Tab("Search"):
gr.HTML(
"""
<h1 style="text-align: center;"> &#128269; MetaRefine &#128269; </h1>
<p style="text-align: center;">&#x2728; <em> Refine Hub model search results by metadata quality.</em> &#x2728;</p>
"""
)
gr.Markdown(
"""This app enables you to perform full-text searches on the Hugging Face Hub for machine learning models.
You can search by keyword or phrase and filter results by metadata quality.
Optionally, you can set a minimum model card length or metadata quality score to refine your results.
Models are ranked based on metadata quality, with higher scores receiving priority.
In case of equal scores, the original search order determines the ranking.
More filtering and sorting options may be added based on user interest!
If you have feedback please [open an issue](https://huggingface.co/spaces/librarian-bots/MetaRefine/discussions/new) in the community tab!
"""
)
with gr.Row():
with gr.Column():
query = gr.Textbox("historic", label="Search query")
with gr.Column():
button = gr.Button("Search")
with gr.Row():
# literal_search = gr.Checkbox(False, label="Literal_search")
# TODO add option for exact matching i.e. phrase matching
# gr.Checkbox(False, label="Must have license?")
mim_model_card_length = gr.Number(
100,
label="Minimum model card length (words)",
)
min_metadata_score = gr.Slider(
0, 100, 50, label="Minimum metadata score (%)"
)
# gr.Markdown("## Search results")
filter_results = gr.Markdown()
results_markdown = gr.Markdown()
button.click(
search_hub,
[query, min_metadata_score, mim_model_card_length],
[filter_results, results_markdown],
)
with gr.Tab("Metadata quality details)"):
with gr.Row():
gr.Markdown(
"""# How metadata quality is scored?
The current approach to metadata scoring is based on checking if a particular piece of metadata is present or not i.e. is a dataset specified in the mode's metadata or not?
For each metadata field a score between 1 and 3 is given if that feature is present or not. These scores are based on the relative importance of the metadata field.
We do this on a task specific basis for models where a `pipeline_tag` exists.
For each task the scores achieved are compared to the maximum possible score for that field."""
)
with gr.Row():
gr.Markdown(
"""
### Common Scores
We start with some 'common scores'. These common scores are for fields which should be present for any model i.e. they are not specific to a particular task."""
)
with gr.Accordion(label="Common scores dictionary"):
gr.JSON(json.dumps(COMMON_SCORES))
with gr.Row():
gr.Markdown(
"""# Task specific scoring.
We also define task specific scores for the following model task types. This allows are scoring to reflect the fact that different tasks have different metadata requirements. For example, the following set includes all tasks for which a language should be specified."""
)
with gr.Row():
markdown_formatted_languages = "".join(
"-" + " " + task + "\n" for task in TASK_TYPES_WITH_LANGUAGES
)
gr.Markdown(markdown_formatted_languages)
with gr.Row():
gr.Markdown(
"""#### Text classification example
Below you can see the example scoring dictionary for text-classification models."""
)
with gr.Accordion(label="Text classification dictionary"):
text_class_scores_example = SCORES["text-classification"]
gr.Json(json.dumps(text_class_scores_example))
with gr.Accordion(label="Full overview of all scores", open=False):
gr.Json(json.dumps(SCORES))
with gr.Tab("Score models"):
model_id_to_score = gr.Textbox(
placeholder="bert-base-uncased", label="Model ID"
)
score_model = gr.Button("Score model")
score_model.click(basic_check_from_hub_id, model_id_to_score, [gr.Json()])
demo.launch()