from functools import lru_cache from fastai.text.all import * from fastcore.all import * import matplotlib.cm as cm import html import gradio as gr learn_inf = load_learner("20210928-model.pkl") def _value2rgba(x, cmap=cm.RdYlGn, alpha_mult=1.0): "Convert a value `x` from 0 to 1 (inclusive) to an RGBA tuple according to `cmap` times transparency `alpha_mult`." c = cmap(x) rgb = (np.array(c[:-1]) * 255).astype(int) a = c[-1] * alpha_mult return tuple(rgb.tolist() + [a]) def _eval_dropouts(mod): module_name = mod.__class__.__name__ if "Dropout" in module_name or "BatchNorm" in module_name: mod.training = False for module in mod.children(): _eval_dropouts(module) def _piece_attn_html(pieces, attns, sep=" ", **kwargs): html_code, spans = [''], [] for p, a in zip(pieces, attns): p = html.escape(p) c = str(_value2rgba(a, alpha_mult=0.5, **kwargs)) spans.append( f'{p}' ) html_code.append(sep.join(spans)) html_code.append("") return "".join(html_code) @lru_cache(maxsize=1024 * 2) def _intrinsic_attention(learn, text, class_id=None): "Calculate the intrinsic attention of the input w.r.t to an output `class_id`, or the classification given by the model if `None`." learn.model.train() _eval_dropouts(learn.model) learn.model.zero_grad() learn.model.reset() dl = learn.dls.test_dl([text]) batch = next(iter(dl))[0] emb = learn.model[0].module.encoder(batch).detach().requires_grad_(True) emb.retain_grad() lstm = learn.model[0].module(emb, True) learn.model.eval() cl = learn.model[1]((lstm, torch.zeros_like(batch).bool(),))[ 0 ].softmax(dim=-1) if class_id is None: class_id = cl.argmax() cl[0][class_id].backward() attn = emb.grad.squeeze().abs().sum(dim=-1) attn /= attn.max() tok, _ = learn.dls.decode_batch((*tuplify(batch), *tuplify(cl)))[0] return tok, attn @patch def intrinsic_attention(x: TextLearner, text: str, class_id: int = None, **kwargs): "Shows the `intrinsic attention for `text`, optional `class_id`" if isinstance(x, LMLearner): raise Exception("Language models are not supported") text, attn = _intrinsic_attention(x, text, class_id) return _piece_attn_html(text.split(), to_np(attn), **kwargs) labels = learn_inf.dls.vocab[1] @lru_cache(maxsize=1024 * 2) def predict_label(title): *_, probs = learn_inf.predict(title) return probs def predict(title): # *_, probs = learn_inf.predict(title) probs = predict_label(title) return learn_inf.intrinsic_attention(title), { labels[i]: float(probs[i]) for i in range(len(labels)) } sample_text = [ [ "Poems on various subjects. Whereto is prefixed a short essay on the structure of English verse" ], [ "Journal of a Residence in China and the neighbouring countries from 1830 to 1833. With an introductory essay by the Hon. and Rev. Baptist Wriothesley Noel. [With a map.]" ], ["The Adventures of Oliver Twist. [With plates.]"], ["['The Adventures of Sherlock Holmes', 'Single Works']"], [ "['Coal, Iron, and Oil; or, the Practical American miner. A plain and popular work on our mines and mineral resources ... With numerous maps and engravings, etc']" ], [ "Summer Travelling in Iceland; being the narrative of two journeys across the island ... With a chapter on Askja by E. Delmar Morgan ... Containing also a literal translation of three sagas. Maps, etc'" ], [ "Histoire de France au moyen aÃÇge, depuis Philippe-Auguste jusqu'aÃÄ la fin du reÃÄgne de Louis XI. 1223-1483. Troisieme eÃÅdition" ], [ "Two Centuries of Soho: its institutions, firms, and amusements. By the Clergy of St. Anne's, Soho, J. H. Cardwell ... H. B. Freeman ... G. C. Wilton ... assisted by other contributors, etc" ], ["""A Christmas Carol"""], ] description = """ British Library Books genre detection model """ article = """ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5245175.svg)](https://doi.org/10.5281/zenodo.5245175) # British Library Books genre detection demo This demo allows you to play with a 'genre' detection model which has been trained to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'. The model was trained with the [fastai](https://docs.fast.ai/) library on training data drawn from [digitised books](https://www.bl.uk/collection-guides/digitised-printed-books) at the British Library. These Books are mainly from the 19th Century. The demo also shows you which parts of the input the model is using most to make its prediction. You can hover over the words to see the attention score assigned to that word. This gives you some sense of which words are important to the model in making a prediction. The examples include titles from the BL books collection. You may notice that the model makes mistakes on short titles in particular, this can partly be explained by the title format in the original data. For example the novel *'Vanity Fair'* by William Makepeace Thackeray is found in the training data as: ``` Vanity Fair. A novel without a hero ... With all the original illustrations by the author, etc ``` You can see that the model gets a bit of help with the genre here 😉. Since the model was trained for a very particular dataset and task it might not work well on titles that don't match this original corpus. ## XXMAJ? You may see some strange tokens in the output. These are tokens used by fastai to indicate particularly things about the text. `xxmaj` is used to indicate the next word begins with a capital in the original text `xxbos` is used to indicate the beginning of a sentence. These can be quite important for helping the model make predictions. As an example, you can try `oliver twist` and `Oliver Twist` and see how the results of the model change. ## Background This model was developed as part of work by the [Living with Machines](https://livingwithmachines.ac.uk/). The process of training the model and working with the data is documented in a tutorial which will be released soon. ## Model description This model is intended to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'. This model was trained on data created from the [Digitised printed books (18th-19th Century)](https://www.bl.uk/collection-guides/digitised-printed-books) book collection. This dataset is dominated by English language books though it includes books in several other languages in much smaller numbers. This model was originally developed for use as part of the Living with Machines project to be able to 'segment' this large dataset of books into different categories based on a 'crude' classification of genre i.e. whether the title was `fiction` or `non-fiction`. You can find more information about the model [here]((https://doi.org/10.5281/zenodo.5245175)) ## Training data The model is trained on a particular collection of books digitised by the British Library. As a result, the model may do less well on titles that look different to this data. In particular, the training data, was mostly English, and mostly from the 19th Century. The model is likely to do less well with non-English languages and book titles which fall outside of the 19th Century. Since the data was derived from books catalogued by the British Library it is also possible the model will perform less well for books held by other institutions if, for example, they catalogue book titles in different ways, or have different biases in the types of books they hold. ## Model performance The model's performance on a held-out test set is as follows: ``` precision recall f1-score support Fiction 0.91 0.88 0.90 296 Non-fiction 0.94 0.95 0.95 554 accuracy 0.93 850 macro avg 0.93 0.92 0.92 850 weighted avg 0.93 0.93 0.93 850 ``` ### Credits > This work was partly supported by [Living with Machines](https://livingwithmachines.ac.uk/). This project, funded by the UK Research and Innovation (UKRI) Strategic Priority Fund, is a multidisciplinary collaboration delivered by the Arts and Humanities Research Council (AHRC), with The Alan Turing Institute, the British Library and the Universities of Cambridge, East Anglia, Exeter, and Queen Mary University of London. > Code for showing attention was adapted from [Zachary Mueller's](https://github.com/muellerzr) [fastinference](https://muellerzr.github.io/fastinference/) library. """ gr_interface = gr.Interface( fn=predict, inputs=gr.inputs.Textbox(), outputs=[ gr.outputs.HTML("Intrinsic attention"), gr.outputs.Label(num_top_classes=len(labels), label="Confidence"), ], title="British Library 19th Century Books Genre Classifier", description=description, article=article, examples=sample_text, allow_screenshot=True, theme="huggingface" ) gr_interface.launch(inline=False, share=False)