# Copyright 2022 Ken Kawamura # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import streamlit as st import requests from typing import Dict import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..")) from app.evaluation_scripts.run_eval import multi_inference_rank_eval st.set_page_config(layout="wide") st.markdown(f'

Submit your question here

', unsafe_allow_html=True) st.sidebar.markdown("# Evaluation 🤔") st.markdown( '', unsafe_allow_html=True) st.markdown( '', unsafe_allow_html=True) INCLUDED_USERS = ['google', 'EleutherAI', "bigscience", "facebook", "openai", "microsoft"] PIPELINE_TAG_TO_TASKS = { 'text-generation': "CausalLM", 'text2text-generation': "Seq2SeqLM"} @st.cache def fetch_model_info_from_huggingface_api() -> Dict[str, Dict[str, str]]: requests.get("https://huggingface.co") response = requests.get("https://huggingface.co/api/models") tags = response.json() model_to_model_id = {} model_to_pipeline_tag = {} for model in tags: model_name = model['modelId'] is_community_model = "/" in model_name if is_community_model: user = model_name.split("/")[0] if user not in INCLUDED_USERS: continue if "pipeline_tag" in model and model["pipeline_tag"] in list(PIPELINE_TAG_TO_TASKS.keys()): model_to_model_id[model['id']] = model['modelId'] model_to_pipeline_tag[model['id'] ] = PIPELINE_TAG_TO_TASKS[model["pipeline_tag"]] return model_to_pipeline_tag model_to_auto_class = fetch_model_info_from_huggingface_api() col1, col2 = st.columns([3, 2]) user_input = {} with col1: st.header("Question") user_input['context'] = st.text_input( label='Write your question. You may explicity mention the answer choices in the prompt.', value='Huggingface is awesome. True or False?') user_input['answer_choices_texts'] = st.text_input( label='Add answer choices in text spearated by a comma and a space.', value='True, False') user_input['answer_choices_texts'] = user_input['answer_choices_texts'].split( ', ') with col2: st.header("Model Config") user_input['model'] = st.selectbox( "Which model?", list(model_to_auto_class.keys())) user_input['auto_class'] = model_to_auto_class[user_input['model']] col4, col5 = st.columns(2) with col5: #style taken from https://css-tricks.com/css-hover-effects-background-masks-3d/ st.markdown("""""", unsafe_allow_html=True) st.header("Submit task") submit = st.button('Submit') with col4: st.header("Result") if submit: with st.spinner('Wait for it...'): prediction = multi_inference_rank_eval( user_input['model'], user_input['auto_class'], user_input['answer_choices_texts'], user_input['context']) # print(prediction) st.markdown(f"### {user_input['answer_choices_texts'][prediction]}")