import gradio as gr import datetime import json import requests from constants import * def process(query_type, index_desc, **kwargs): timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') index = INDEX_BY_DESC[index_desc] data = { 'source': 'hf' if not DEBUG else 'hf-dev', 'timestamp': timestamp, 'query_type': query_type, 'index': index, } data.update(kwargs) print(json.dumps(data)) if API_URL is None: raise ValueError(f'API_URL envvar is not set!') try: response = requests.post(API_URL, json=data, timeout=10) except requests.exceptions.Timeout: raise ValueError('Web request timed out. Please try again later.') except requests.exceptions.RequestException as e: raise ValueError(f'Web request error: {e}') if response.status_code == 200: result = response.json() else: raise ValueError(f'HTTP error {response.status_code}: {response.json()}') if DEBUG: print(result) return result def format_tokenization_info(result): if not ('token_ids' in result and 'tokens' in result): return '' token_ids = result['token_ids'] tokens = result['tokens'] if type(token_ids) == list and all([type(token_id) == int for token_id in token_ids]): output = '[' + " ".join(['"' + token.replace('Ġ', ' ') + '"' for token in tokens]) + '] ' + str(token_ids) else: ttt = [] for token_idss, tokenss in zip(token_ids, tokens): tt = [] for token_ids, tokens in zip(token_idss, tokenss): t = '[' + " ".join(['"' + token.replace('Ġ', ' ') + '"' for token in tokens]) + '] ' + str(token_ids) tt.append(t) tt = '\n'.join(tt) ttt.append(tt) output = '\n\n'.join(ttt) return output def format_doc_metadata(doc): formatted = f'Document #{doc["doc_ix"]}\n' if doc['doc_len'] == doc['disp_len']: formatted += f'Length: {doc["doc_len"]} tokens\n' else: formatted += f'Length: {doc["doc_len"]} tokens ({doc["disp_len"]} tokens displayed)\n' metadata = doc['metadata'].strip("\n") formatted += f'Metadata: {metadata}' return formatted def count(index_desc, query, max_clause_freq, max_diff_tokens): if ' AND ' in query or ' OR ' in query: # CNF query result = process('count', index_desc, query=query, max_clause_freq=max_clause_freq, max_diff_tokens=max_diff_tokens) else: # simple query result = process('count', index_desc, query=query) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: count = result['error'] else: count = f'{result["count"]:,}' return latency, tokenization_info, count def prob(index_desc, query): result = process('prob', index_desc, query=query) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: prob = result['error'] elif result['prompt_cnt'] == 0: prob = '(n-1)-gram is not found in the corpus' else: prob = f'{result["prob"]:.4f} ({result["cont_cnt"]:,} / {result["prompt_cnt"]:,})' return latency, tokenization_info, prob def ntd(index_desc, query, max_support): result = process('ntd', index_desc, query=query, max_support=max_support) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: ntd = result['error'] else: result_by_token_id = result['result_by_token_id'] ntd = {} for token_id, r in result_by_token_id.items(): ntd[f'{r["token"]} ({r["cont_cnt"]} / {result["prompt_cnt"]})'] = r['prob'] if ntd == {}: ntd = '(n-1)-gram is not found in the corpus' return latency, tokenization_info, ntd def infgram_prob(index_desc, query): result = process('infgram_prob', index_desc, query=query) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: longest_suffix = '' prob = result['error'] else: longest_suffix = result['longest_suffix'] prob = f'{result["prob"]:.4f} ({result["cont_cnt"]:,} / {result["prompt_cnt"]:,})' return latency, tokenization_info, longest_suffix, prob def infgram_ntd(index_desc, query, max_support): result = process('infgram_ntd', index_desc, query=query, max_support=max_support) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: longest_suffix = '' ntd = result['error'] else: longest_suffix = result['longest_suffix'] result_by_token_id = result['result_by_token_id'] ntd = {} for token_id, r in result_by_token_id.items(): ntd[f'{r["token"]} ({r["cont_cnt"]} / {result["prompt_cnt"]})'] = r['prob'] return latency, tokenization_info, longest_suffix, ntd def search_docs(index_desc, query, maxnum, max_disp_len, max_clause_freq, max_diff_tokens): if ' AND ' in query or ' OR ' in query: # CNF query result = process('search_docs', index_desc, query=query, maxnum=maxnum, max_disp_len=max_disp_len, max_clause_freq=max_clause_freq, max_diff_tokens=max_diff_tokens) else: # simple query result = process('search_docs', index_desc, query=query, maxnum=maxnum, max_disp_len=max_disp_len) latency = '' if 'latency' not in result else f'{result["latency"]:.3f}' tokenization_info = format_tokenization_info(result) if 'error' in result: message = result['error'] metadatas = ['' for _ in range(MAXNUM)] docs = [[] for _ in range(MAXNUM)] else: message = result['message'] metadatas = [format_doc_metadata(doc) for doc in result['documents']] docs = [doc['spans'] for doc in result['documents']] metadatas = metadatas[:maxnum] docs = docs[:maxnum] while len(metadatas) < MAXNUM: metadatas.append('') while len(docs) < MAXNUM: docs.append([]) return tuple([latency, tokenization_info, message] + metadatas + docs) with gr.Blocks() as demo: with gr.Column(): gr.HTML( '''

Infini-gram: An Engine for n-gram / ∞-gram Language Modeling with Trillion-Token Corpora

This is an engine that processes n-gram / ∞-gram queries on massive text corpora. Please first select the corpus and the type of query, then enter your query and submit.

The engine is developed by Jiacheng (Gary) Liu and documented in our paper: Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens. Feel free to check out our Project Homepage.

API Endpoint: If you'd like to issue batch queries to infini-gram, you may invoke our API endpoint. Please refer to the API documentation.

Note: The query is case-sensitive. Your query will be tokenized with the Llama-2 tokenizer (unless otherwise specified).

''' ) with gr.Row(): with gr.Column(scale=1, min_width=240): index_desc = gr.Radio(choices=INDEX_DESCS, label='Corpus', value=INDEX_DESCS[0]) with gr.Column(scale=7): with gr.Tab('1. Count an n-gram'): with gr.Column(): gr.HTML('

1. Count an n-gram

') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This counts the number of times an n-gram appears in the corpus. If you submit an empty input, it will return the total number of tokens in the corpus. You can also make more complex queries by connecting multiple n-gram terms with the AND/OR operators, in the CNF format.


Example queries:


Notes on CNF queries:

''') with gr.Row(): with gr.Column(scale=1): count_query = gr.Textbox(placeholder='Enter a string (an n-gram) here', label='Query', interactive=True) with gr.Accordion(label='Advanced options', open=False): with gr.Row(): count_max_clause_freq = gr.Slider(minimum=1, maximum=MAX_CLAUSE_FREQ, value=max_clause_freq, step=1, label='max_clause_freq') count_max_diff_tokens = gr.Slider(minimum=1, maximum=MAX_DIFF_TOKENS, value=max_diff_tokens, step=1, label='max_diff_tokens') with gr.Row(): count_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) count_submit = gr.Button(value='Submit', variant='primary', visible=True) count_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) count_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) with gr.Column(scale=1): count_count = gr.Label(label='Count', num_top_classes=0) count_clear.add([count_query, count_latency, count_tokenized, count_count]) count_submit.click(count, inputs=[index_desc, count_query, count_max_clause_freq, count_max_diff_tokens], outputs=[count_latency, count_tokenized, count_count], api_name=False) with gr.Tab('2. Prob of the last token'): with gr.Column(): gr.HTML('

2. Compute the probability of the last token in an n-gram

') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This computes the n-gram probability of the last token conditioned on the previous tokens (i.e. (n-1)-gram)).


Example query: natural language processing (the output is P(processing | natural language), by counting the appearance of the 3-gram "natural language processing" and the 2-gram "natural language", and take the division between the two)


Notes:

''') with gr.Row(): with gr.Column(scale=1): prob_query = gr.Textbox(placeholder='Enter a string (an n-gram) here', label='Query', interactive=True) with gr.Row(): prob_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) prob_submit = gr.Button(value='Submit', variant='primary', visible=True) prob_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) prob_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) with gr.Column(scale=1): prob_probability = gr.Label(label='Probability', num_top_classes=0) prob_clear.add([prob_query, prob_latency, prob_tokenized, prob_probability]) prob_submit.click(prob, inputs=[index_desc, prob_query], outputs=[prob_latency, prob_tokenized, prob_probability], api_name=False) with gr.Tab('3. Next-token distribution'): with gr.Column(): gr.HTML('

3. Compute the next-token distribution of an (n-1)-gram

') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This is an extension of the Query Type 2: It interprets your input as the (n-1)-gram and gives you the full next-token distribution.


Example query: natural language (the output is P(* | natural language), for the top-10 tokens *)


Notes:

''') with gr.Row(): with gr.Column(scale=1): ntd_query = gr.Textbox(placeholder='Enter a string (an (n-1)-gram) here', label='Query', interactive=True) with gr.Accordion(label='Advanced options', open=False): ntd_max_support = gr.Slider(minimum=1, maximum=MAX_SUPPORT, value=MAX_SUPPORT, step=1, label='max_support') with gr.Row(): ntd_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) ntd_submit = gr.Button(value='Submit', variant='primary', visible=True) ntd_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) ntd_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) with gr.Column(scale=1): ntd_distribution = gr.Label(label='Distribution', num_top_classes=10) ntd_clear.add([ntd_query, ntd_latency, ntd_tokenized, ntd_distribution]) ntd_submit.click(ntd, inputs=[index_desc, ntd_query, ntd_max_support], outputs=[ntd_latency, ntd_tokenized, ntd_distribution], api_name=False) with gr.Tab('4. ∞-gram prob'): with gr.Column(): gr.HTML('

4. Compute the ∞-gram probability of the last token

') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This computes the ∞-gram probability of the last token conditioned on the previous tokens. Compared to Query Type 2 (which uses your entire input for n-gram modeling), here we take the longest suffix that we can find in the corpus.


Example query: I love natural language processing (if "natural language" appears in the corpus but "love natural language" doesn't, the output is P(processing | natural language); in this case the effective n = 3)


Notes:

''') with gr.Row(): with gr.Column(scale=1): infgram_prob_query = gr.Textbox(placeholder='Enter a string here', label='Query', interactive=True) with gr.Row(): infgram_prob_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) infgram_prob_submit = gr.Button(value='Submit', variant='primary', visible=True) infgram_prob_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) infgram_prob_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) infgram_prob_longest_suffix = gr.Textbox(label='Longest Found Suffix', interactive=False) with gr.Column(scale=1): infgram_prob_probability = gr.Label(label='Probability', num_top_classes=0) infgram_prob_clear.add([infgram_prob_query, infgram_prob_latency, infgram_prob_tokenized, infgram_prob_longest_suffix, infgram_prob_probability]) infgram_prob_submit.click(infgram_prob, inputs=[index_desc, infgram_prob_query], outputs=[infgram_prob_latency, infgram_prob_tokenized, infgram_prob_longest_suffix, infgram_prob_probability], api_name=False) with gr.Tab('5. ∞-gram next-token distribution'): with gr.Column(): gr.HTML('

5. Compute the ∞-gram next-token distribution

') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This is similar to Query Type 3, but with ∞-gram instead of n-gram.


Example query: I love natural language (if "natural language" appears in the corpus but "love natural language" doesn't, the output is P(* | natural language), for the top-10 tokens *)

''') with gr.Row(): with gr.Column(scale=1): infgram_ntd_query = gr.Textbox(placeholder='Enter a string here', label='Query', interactive=True) with gr.Accordion(label='Advanced options', open=False): infgram_ntd_max_support = gr.Slider(minimum=1, maximum=MAX_SUPPORT, value=MAX_SUPPORT, step=1, label='max_support') with gr.Row(): infgram_ntd_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) infgram_ntd_submit = gr.Button(value='Submit', variant='primary', visible=True) infgram_ntd_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) infgram_ntd_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) infgram_ntd_longest_suffix = gr.Textbox(label='Longest Found Suffix', interactive=False) with gr.Column(scale=1): infgram_ntd_distribution = gr.Label(label='Distribution', num_top_classes=10) infgram_ntd_clear.add([infgram_ntd_query, infgram_ntd_latency, infgram_ntd_tokenized, infgram_ntd_longest_suffix, infgram_ntd_distribution]) infgram_ntd_submit.click(infgram_ntd, inputs=[index_desc, infgram_ntd_query, infgram_ntd_max_support], outputs=[infgram_ntd_latency, infgram_ntd_tokenized, infgram_ntd_longest_suffix, infgram_ntd_distribution], api_name=False) with gr.Tab('6. Search documents'): with gr.Column(): gr.HTML(f'''

6. Search for documents containing n-gram(s)

''') with gr.Accordion(label='Click to view instructions', open=False): gr.HTML(f'''

This displays a few random documents in the corpus that satisfies your query. You can simply enter an n-gram, in which case the document displayed would contain your n-gram. You can also connect multiple n-gram terms with the AND/OR operators, in the CNF format, in which case the displayed document contains n-grams such that it satisfies this logical constraint.


Example queries:


If you want another batch of random documents, simply hit the Submit button again :)


Notes on CNF queries:


❗️WARNING: Corpus may contain problematic contents such as PII, toxicity, hate speech, and NSFW text. This tool is merely presenting selected text from the corpus, without any post-hoc safety filtering. It is NOT creating new text. This is a research prototype through which we can expose and examine existing problems with massive text corpora. Please use with caution. Don't be evil :)

''') with gr.Row(): with gr.Column(scale=1): search_docs_query = gr.Textbox(placeholder='Enter a query here', label='Query', interactive=True) search_docs_maxnum = gr.Slider(minimum=1, maximum=MAXNUM, value=maxnum, step=1, label='Number of documents to display') search_docs_max_disp_len = gr.Slider(minimum=1, maximum=MAX_DISP_LEN, value=max_disp_len, step=1, label='Number of tokens to display') with gr.Accordion(label='Advanced options', open=False): with gr.Row(): search_docs_max_clause_freq = gr.Slider(minimum=1, maximum=MAX_CLAUSE_FREQ, value=max_clause_freq, step=1, label='max_clause_freq') search_docs_max_diff_tokens = gr.Slider(minimum=1, maximum=MAX_DIFF_TOKENS, value=max_diff_tokens, step=1, label='max_diff_tokens') with gr.Row(): search_docs_clear = gr.ClearButton(value='Clear', variant='secondary', visible=True) search_docs_submit = gr.Button(value='Submit', variant='primary', visible=True) search_docs_latency = gr.Textbox(label='Latency (milliseconds)', interactive=False, lines=1) search_docs_tokenized = gr.Textbox(label='Tokenized', lines=1, interactive=False) with gr.Column(scale=2): search_docs_message = gr.Label(label='Message', num_top_classes=0) search_docs_metadatas = [] search_docs_outputs = [] for i in range(MAXNUM): with gr.Tab(label=str(i+1)): search_docs_metadatas.append(gr.Textbox(label='Metadata', lines=3, interactive=False)) search_docs_outputs.append(gr.HighlightedText(label='Document', show_legend=False, color_map={"-": "red", "0": "green", "1": "cyan", "2": "blue", "3": "magenta"})) search_docs_clear.add([search_docs_query, search_docs_latency, search_docs_tokenized, search_docs_message] + search_docs_metadatas + search_docs_outputs) search_docs_submit.click(search_docs, inputs=[index_desc, search_docs_query, search_docs_maxnum, search_docs_max_disp_len, search_docs_max_clause_freq, search_docs_max_diff_tokens], outputs=[search_docs_latency, search_docs_tokenized, search_docs_message] + search_docs_metadatas + search_docs_outputs, api_name=False) with gr.Row(): gr.Markdown(''' If you find this tool useful, please kindly cite our paper: ```bibtex @article{Liu2024InfiniGram, title={Infini-gram: Scaling Unbounded n-gram Language Models to a Trillion Tokens}, author={Liu, Jiacheng and Min, Sewon and Zettlemoyer, Luke and Choi, Yejin and Hajishirzi, Hannaneh}, journal={arXiv preprint arXiv:2401.17377}, year={2024} } ``` ''') demo.queue( default_concurrency_limit=DEFAULT_CONCURRENCY_LIMIT, max_size=MAX_SIZE, api_open=False, ).launch( max_threads=MAX_THREADS, debug=DEBUG, show_api=False, )