machineuser commited on
Commit
c011cc0
1 Parent(s): dda6509

commit the build, and set as Space

Browse files
.eslintcache CHANGED
@@ -1 +1 @@
1
- [{"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/index.js":"1","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/reportWebVitals.js":"2","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/App.js":"3","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/Instructions.js":"4","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/InputField.js":"5","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/Section.js":"6"},{"size":500,"mtime":1607266233636,"results":"7","hashOfConfig":"8"},{"size":362,"mtime":1607349763851,"results":"9","hashOfConfig":"8"},{"size":17671,"mtime":1607514495570,"results":"10","hashOfConfig":"8"},{"size":26840,"mtime":1607425868280,"results":"11","hashOfConfig":"8"},{"size":578,"mtime":1607425397810,"results":"12","hashOfConfig":"8"},{"size":634,"mtime":1607513652797,"results":"13","hashOfConfig":"8"},{"filePath":"14","messages":"15","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0,"usedDeprecatedRules":"16"},"d40mw9",{"filePath":"17","messages":"18","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0,"usedDeprecatedRules":"16"},{"filePath":"19","messages":"20","errorCount":0,"warningCount":3,"fixableErrorCount":0,"fixableWarningCount":0,"source":null},{"filePath":"21","messages":"22","errorCount":0,"warningCount":3,"fixableErrorCount":0,"fixableWarningCount":0,"source":"23","usedDeprecatedRules":"16"},{"filePath":"24","messages":"25","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0,"usedDeprecatedRules":"16"},{"filePath":"26","messages":"27","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0},"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/index.js",[],["28","29"],"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/reportWebVitals.js",[],"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/App.js",["30","31","32"],"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/Instructions.js",["33","34","35"],"const NAME = 'Instructions'\n\nexport default {\n name: NAME,\n instructions: {\n yamlTags: {\n paragraph: [\n \"Add YAML tags\"\n ],\n example: [\n \"---\",\n `annotations_creators:`,\n `- no-annotation`,\n `language_creators:`,\n `- found`,\n `languages:`,\n `- en`,\n `licenses:`,\n `- unknown`,\n `multilinguality:`,\n `- monolingual`,\n `size_categories:`,\n `- 100K<n<1M`,\n `source_datasets:`,\n `- original`,\n `task_categories:`,\n `- question-answering`,\n `task_ids:`,\n `- abstractive-qa`,\n `- open-domain-qa`,\n `---`,\n ]\n },\n homepage: {\n paragraph: [\n \"Add homepage URL here if available (unless it's a GitHub repository)\"\n ],\n example: [\n \"[ELI5 homepage](https://facebookresearch.github.io/ELI5/explore.html)\"\n ]\n },\n repository: {\n paragraph: [\n \"If the dataset is hosted on github or has a github homepage, add URL here\"\n ],\n example: [\n \"[ELI5 repository](https://github.com/facebookresearch/ELI5)\"\n ]\n }, \n paper: {\n paragraph: [\n \"If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)\"\n ],\n example: [\n \"[ELI5: Long Form Question Answering](https://arxiv.org/abs/1907.09190)\"\n ]\n }, \n leaderboard: {\n paragraph: [\n \"If the dataset supports an active leaderboard, add link here\"\n ],\n example: [\n \"[N/A]\"\n ]\n }, \n contact: {\n paragraph: [\n \"If known, name and email of at least one person the reader can contact for questions about the dataset.\"\n ],\n example: [\n \"[Yacine Jernite](mailto:yacine@huggingface.co)\"\n ]\n }, \n datasetSummary: {\n paragraph: [\n \"Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. translations between several pairs of European languages), and describe the domain, topic, or genre covered.\"\n ],\n example: [\n \"The ELI5 dataset is an English-language dataset of questions and answers gathered from three subreddits were users ask factual questions requiring paragraph-length or longer answers. The dataset was created to support the task of open-domain long form abstractive question answering, and covers questions about general topics in its [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) subset, science in it [r/askscience](https://www.reddit.com/r/askscience/) subset, and History in its [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subset.\"\n ]\n },\n supportedTasks: {\n paragraph: [\n \"For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`).\",\n \"- `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name).\"\n ],\n example: [\n \"- `abstractive-qa`, `open-domain-qa`: The dataset can be used to train a model for Open Domain Long Form Question Answering. An LFQA model is presented with a non-factoid and asked to retrieve relevant information from a knowledge source (such as [Wikipedia](https://www.wikipedia.org/)), then use it to generate a multi-sentence answer. The model performance is measured by how high its [ROUGE](https://huggingface.co/metrics/rouge) score to the reference is. A [BART-based model](https://huggingface.co/yjernite/bart_eli5) with a [dense retriever](https://huggingface.co/yjernite/retribert-base-uncased) trained to draw information from [Wikipedia passages](https://huggingface.co/datasets/wiki_snippets) achieves a [ROUGE-L of 0.149](https://yjernite.github.io/lfqa.html#generation).\"\n ]\n },\n languages: {\n paragraph: [\n \"Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,...\",\n \"When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available.\"\n ],\n example: [\n \"The text in the dataset is in English, as spoken by Reddit users on the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits. The associated BCP-47 code is `en`.\"\n ]\n },\n dataInstances: {\n paragraph: [\n \"Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples.\",\n `\n {\n 'example_field': ...,\n ...\n }\n `,\n \"Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit.\",\n ],\n example: [\n \"A typical data point comprises a question, with a `title` containing the main question and a `selftext` which sometimes elaborates on it, and a list of answers from the forum sorted by the number of upvotes they obtained. Additionally, the URLs in each of the text fields have been extracted to respective lists and replaced by generic tokens in the text.\",\n \"An example from the ELI5 test set looks as follows:\",\n `{'q_id': '8houtx'`,\n `'title': 'Why does water heated to room temperature feel colder than the air around it?'`,\n `'selftext': ''`,\n `'document': ''`,\n `'subreddit': 'explainlikeimfive'`,\n `'answers': {'a_id': ['dylcnfk', 'dylcj49']`,\n `'text': [\"Water transfers heat more efficiently than air. When something feels cold it's because heat is being transferred from your skin to whatever you're touching. Since water absorbs the heat more readily than air, it feels colder.\",\n \"Air isn't as good at transferring heat compared to something like water or steel (sit on a room temperature steel bench vs. a room temperature wooden bench, and the steel one will feel more cold).\\n\\nWhen you feel cold, what you're feeling is heat being transferred out of you. If there is no breeze, you feel a certain way. If there's a breeze, you will get colder faster (because the moving air is pulling the heat away from you), and if you get into water, its quite good at pulling heat from you. Get out of the water and have a breeze blow on you while you're wet, all of the water starts evaporating, pulling even more heat from you.\"]`,\n `'score': [5, 2]}`,\n `'title_urls': {'url': []}`,\n `'selftext_urls': {'url': []}`,\n `'answers_urls': {'url': []}}`,\n ]\n },\n dataFields: {\n paragraph: [\n \"List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.\",\n \"- `example_field`: description of `example_field`\"\n ], \n example: [\n `- q_id: a string question identifier for each example, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/submissions/) Reddit submission dumps.`,\n `- subreddit: One of explainlikeimfive, askscience, or AskHistorians, indicating which subreddit the question came from`,\n `- title: title of the question, with URLs extracted and replaced by URL_n tokens`,\n `- title_urls: list of the extracted URLs, the nth element of the list was replaced by URL_n`,\n `- selftext: either an empty string or an elaboration of the question`,\n `- selftext_urls: similar to title_urls but for self_text`,\n `- answers: a list of answers, each answer has:`,\n `- a_id: a string answer identifier for each answer, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/comments/) Reddit comments dumps.`,\n `- text: the answer text with the URLs normalized`,\n `- score: the number of upvotes the answer had received when the dumps were created`,\n `- answers_urls: a list of the extracted URLs. All answers use the same list, the numbering of the normalization token continues across answer texts`,\n ]\n },\n dataSplits: {\n paragraph: [\n \"Describe and name the splits in the dataset if there are more than one.\",\n \"Describe any criteria for splitting the data, if used. If their are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.\",\n \"Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:\",\n `\tTain\tValid\tTest\n Input Sentences \t\n Average Sentence Length`,\n ],\n example: [\n \"The data is split into a training, validation and test set for each of the three subreddits. In order to avoid having duplicate questions in across sets, the `title` field of each of the questions were ranked by their tf-idf match to their nearest neighbor and the ones with the smallest value were used in the test and validation sets. The final split sizes are as follow:\",\n `\tTain\tValid\tTest\n r/explainlikeimfive examples\t272634\t9812\t24512\n r/askscience examples\t131778\t2281\t4462\n r/AskHistorians examples\t98525\t4901\t9764`,\n ]\n },\n curationRationale: {\n paragraph: [\n \"What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together?\",\n ],\n example: [\n \"ELI5 was built to provide a testbed for machines to learn how to answer more complex questions, which requires them to find and combine information in a coherent manner. The dataset was built by gathering questions that were asked by community members of three subreddits, including [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), along with the answers that were provided by other users. The [rules of the subreddit](https://www.reddit.com/r/explainlikeimfive/wiki/detailed_rules) make this data particularly well suited to training a model for abstractive question answering: the questions need to seek an objective explanation about well established facts, and the answers provided need to be understandable to a layperson without any particular knowledge domain.\"\n ]\n },\n dataCollection: {\n paragraph: [\n \"Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process.\",\n \"If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name).\",\n \"If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used.\"\n ],\n example:[\n \"The data was obtained by filtering submissions and comments from the subreddits of interest from the XML dumps of the [Reddit forum](https://www.reddit.com/) hosted on [Pushshift.io](https://files.pushshift.io/reddit/).\",\n \"In order to further improve the quality of the selected examples, only questions with a score of at least 2 and at least one answer with a score of at least 2 were selected for the dataset. The dataset questions and answers span a period form August 2012 to August 2019.\"\n ]\n },\n sourceLanguage: {\n paragraph: [\n \"State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data.\",\n \"If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.\",\n \"Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.\",\n \"Describe other people represented or mentioned in the data. Where possible, link to references for the information.\"\n ],\n example: [\n \"The language producers are users of the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits between 2012 and 2019. No further demographic information was available from the data source.\"\n ]\n },\n annotations: {\n paragraph: [\n \"If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs.\"\n ],\n example: [\n \"The dataset does not contain any additional annotations.\"\n ]\n },\n annotationProcess: {\n paragraph: [\n \"If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes.\"\n ],\n example: [\n \"[N/A]\"\n ]\n },\n annotators: {\n paragraph: [\n \"If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated.\",\n \"Describe the people or systems who originally created the annotations and their selection criteria if applicable.\",\n \"If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.\",\n \"Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.\"\n ],\n example: [\n \"[N/A]\"\n ]\n },\n personalInformation: {\n paragraph: [\n \"State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).\",\n \"State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).\",\n \"If efforts were made to anonymize the data, describe the anonymization process.\"\n ],\n example: [\n \"The authors removed the speaker IDs from the [Pushshift.io](https://files.pushshift.io/reddit/) dumps but did not otherwise anonymize the data. Some of the questions and answers are about contemporary public figures or individuals who appeared in the news.\"\n ]\n },\n socialImpact: {\n paragraph: [\n \"The purpose of this dataset is to help develop better question answering systems.\",\n \"The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations.\",\n \"Please also mention in this section if the proposed dataset contains a *low-resource* or under-represented language.\"\n ],\n example: [\n \"The purpose of this dataset is to help develop better question answering systems.\",\n \"A system that succeeds at the supported task would be able to provide a coherent answer to even complex questions requiring a multi-step explanation, which is beyond the ability of even the larger existing models. The task is also thought as a test-bed for retrieval model which can show the users which source text was used in generating the answer and allow them to confirm the information provided to them.\",\n \"It should be noted however that the provided answers were written by Reddit users, an information which may be lost if models trained on it are deployed in down-stream applications and presented to users without context. The specific biases this may introduce are discussed in the next section.\"\n ]\n },\n biasesDiscussion: {\n paragraph: [\n \"Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact.\",\n \"For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic.\",\n \"If analyses have been run quantifying these biases, please add brief summaries and links to the studies here.\"\n ],\n example: [\n \"While Reddit hosts a number of thriving communities with high quality discussions, it is also widely known to have corners where sexism, hate, and harassment are significant issues. See for example the [recent post from Reddit founder u/spez](https://www.reddit.com/r/announcements/comments/gxas21/upcoming_changes_to_our_content_policy_our_board/) outlining some of the ways he thinks the website's historical policies have been responsible for this problem, [Adrienne Massanari's 2015 article on GamerGate](https://www.researchgate.net/publication/283848479_Gamergate_and_The_Fappening_How_Reddit's_algorithm_governance_and_culture_support_toxic_technocultures) and follow-up works, or a [2019 Wired article on misogyny on Reddit](https://www.wired.com/story/misogyny-reddit-research/).\",\n \"While there has been some recent work in the NLP community on *de-biasing* models (e.g. [Black is to Criminal as Caucasian is to Police: Detecting and Removing Multiclass Bias in Word Embeddings](https://arxiv.org/abs/1904.04047) for word embeddings trained specifically on Reddit data), this problem is far from solved, and the likelihood that a trained model might learn the biases present in the data remains a significant concern.\",\n `We still note some encouraging signs for all of these communities: [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) and [r/askscience](https://www.reddit.com/r/askscience/) have similar structures and purposes, and [r/askscience](https://www.reddit.com/r/askscience/) was found in 2015 to show medium supportiveness and very low toxicity when compared to other subreddits (see a [hackerfall post](https://hackerfall.com/story/study-and-interactive-visualization-of-toxicity-in), [thecut.com write-up](https://www.thecut.com/2015/03/interactive-chart-of-reddits-toxicity.html) and supporting [data](https://chart-studio.plotly.com/~bsbell21/210/toxicity-vs-supportiveness-by-subreddit/#data)). Meanwhile, the [r/AskHistorians rules](https://www.reddit.com/r/AskHistorians/wiki/rules) mention that the admins will not tolerate \"_racism, sexism, or any other forms of bigotry_\". However, further analysis of whether and to what extent these rules reduce toxicity is still needed.`,\n \"We also note that given the audience of the Reddit website which is more broadly used in the US and Europe, the answers will likely present a Western perspectives, which is particularly important to note when dealing with historical topics.\"\n ]\n },\n limitations: {\n paragraph: [\n \"If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here.\"\n ],\n example: [\n \"The answers provided in the dataset are represent the opinion of Reddit users. While these communities strive to be helpful, they should not be considered to represent a ground truth.\"\n ]\n },\n datasetCurators: {\n paragraph: [\n \"List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here.\"\n ],\n example: [\n \"The dataset was initially created by Angela Fan, Ethan Perez, Yacine Jernite, Jason Weston, Michael Auli, and David Grangier, during work done at Facebook AI Research (FAIR).\"\n ]\n },\n licensingInformation: {\n paragraph: [\n \"Provide the license and link to the license webpage if available.\"\n ],\n example: [\n \"The licensing status of the dataset hinges on the legal status of the [Pushshift.io](https://files.pushshift.io/reddit/) data which is unclear.\"\n ]\n },\n citationInformation: {\n paragraph: [\n \"Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:\",\n `\n @article{article_id,\n author = {Author List},\n title = {Dataset Paper Title},\n journal = {Publication Venue},\n year = {2525}\n }\n `,\n \"If the dataset has a [DOI](https://www.doi.org/), please provide it here.\"\n ],\n example: [\n `@inproceedings{eli5_lfqa`,\n `author = {Angela Fan and`,\n `Yacine Jernite and`,\n `Ethan Perez and`,\n `David Grangier and`,\n `Jason Weston and`,\n `Michael Auli},`,\n `editor = {Anna Korhonen and`,\n `David R. Traum and`,\n `Llu{\\'{\\i}}s M{\\`{a}}rquez},`,\n `title = {{ELI5:} Long Form Question Answering},`,\n `booktitle = {Proceedings of the 57th Conference of the Association for Computational`,\n `Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,`,\n `Volume 1: Long Papers},`,\n `pages = {3558--3567},`,\n `publisher = {Association for Computational Linguistics},`,\n `year = {2019},`,\n `url = {https://doi.org/10.18653/v1/p19-1346},`,\n `doi = {10.18653/v1/p19-1346}`,\n `}`,\n ]\n },\n }\n}","/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/InputField.js",[],"/Users/evrardtserstevens/Documents/HuggingFace/datasetcard/src/Section.js",[],{"ruleId":"36","replacedBy":"37"},{"ruleId":"38","replacedBy":"39"},{"ruleId":"40","severity":1,"message":"41","line":338,"column":71,"nodeType":"42","endLine":338,"endColumn":86},{"ruleId":"40","severity":1,"message":"41","line":346,"column":157,"nodeType":"42","endLine":346,"endColumn":172},{"ruleId":"40","severity":1,"message":"41","line":360,"column":103,"nodeType":"42","endLine":360,"endColumn":118},{"ruleId":"43","severity":1,"message":"44","line":3,"column":1,"nodeType":"45","endLine":317,"endColumn":2},{"ruleId":"46","severity":1,"message":"47","line":303,"column":22,"nodeType":"48","messageId":"49","endLine":303,"endColumn":23,"suggestions":"50"},{"ruleId":"46","severity":1,"message":"51","line":303,"column":25,"nodeType":"48","messageId":"49","endLine":303,"endColumn":26,"suggestions":"52"},"no-native-reassign",["53"],"no-negated-in-lhs",["54"],"react/jsx-no-target-blank","Using target=\"_blank\" without rel=\"noreferrer\" is a security risk: see https://html.spec.whatwg.org/multipage/links.html#link-type-noopener","JSXAttribute","import/no-anonymous-default-export","Assign object to a variable before exporting as module default","ExportDefaultDeclaration","no-useless-escape","Unnecessary escape character: \\'.","TemplateElement","unnecessaryEscape",["55","56"],"Unnecessary escape character: \\i.",["57","58"],"no-global-assign","no-unsafe-negation",{"messageId":"59","fix":"60","desc":"61"},{"messageId":"62","fix":"63","desc":"64"},{"messageId":"59","fix":"65","desc":"61"},{"messageId":"62","fix":"66","desc":"64"},"removeEscape",{"range":"67","text":"68"},"Remove the `\\`. This maintains the current functionality.","escapeBackslash",{"range":"69","text":"70"},"Replace the `\\` with `\\\\` to include the actual backslash character.",{"range":"71","text":"68"},{"range":"72","text":"70"},[26157,26158],"",[26157,26157],"\\",[26160,26161],[26160,26160]]
 
1
+ [{"/home/web/card-creator/src/index.js":"1","/home/web/card-creator/src/reportWebVitals.js":"2","/home/web/card-creator/src/App.js":"3","/home/web/card-creator/src/InputField.js":"4","/home/web/card-creator/src/Instructions.js":"5","/home/web/card-creator/src/Section.js":"6"},{"size":500,"mtime":1607352417482,"results":"7","hashOfConfig":"8"},{"size":362,"mtime":1607352417482,"results":"9","hashOfConfig":"8"},{"size":17671,"mtime":1607526113319,"results":"10","hashOfConfig":"8"},{"size":578,"mtime":1607426079320,"results":"11","hashOfConfig":"8"},{"size":26840,"mtime":1607426079320,"results":"12","hashOfConfig":"8"},{"size":634,"mtime":1607526113319,"results":"13","hashOfConfig":"8"},{"filePath":"14","messages":"15","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0},"xdj4q6",{"filePath":"16","messages":"17","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0},{"filePath":"18","messages":"19","errorCount":0,"warningCount":3,"fixableErrorCount":0,"fixableWarningCount":0,"source":null},{"filePath":"20","messages":"21","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0},{"filePath":"22","messages":"23","errorCount":0,"warningCount":3,"fixableErrorCount":0,"fixableWarningCount":0,"source":null},{"filePath":"24","messages":"25","errorCount":0,"warningCount":0,"fixableErrorCount":0,"fixableWarningCount":0},"/home/web/card-creator/src/index.js",[],"/home/web/card-creator/src/reportWebVitals.js",[],"/home/web/card-creator/src/App.js",["26","27","28"],"/home/web/card-creator/src/InputField.js",[],"/home/web/card-creator/src/Instructions.js",["29","30","31"],"/home/web/card-creator/src/Section.js",[],{"ruleId":"32","severity":1,"message":"33","line":338,"column":71,"nodeType":"34","endLine":338,"endColumn":86},{"ruleId":"32","severity":1,"message":"33","line":346,"column":157,"nodeType":"34","endLine":346,"endColumn":172},{"ruleId":"32","severity":1,"message":"33","line":360,"column":103,"nodeType":"34","endLine":360,"endColumn":118},{"ruleId":"35","severity":1,"message":"36","line":3,"column":1,"nodeType":"37","endLine":317,"endColumn":2},{"ruleId":"38","severity":1,"message":"39","line":303,"column":22,"nodeType":"40","messageId":"41","endLine":303,"endColumn":23,"suggestions":"42"},{"ruleId":"38","severity":1,"message":"43","line":303,"column":25,"nodeType":"40","messageId":"41","endLine":303,"endColumn":26,"suggestions":"44"},"react/jsx-no-target-blank","Using target=\"_blank\" without rel=\"noreferrer\" is a security risk: see https://html.spec.whatwg.org/multipage/links.html#link-type-noopener","JSXAttribute","import/no-anonymous-default-export","Assign object to a variable before exporting as module default","ExportDefaultDeclaration","no-useless-escape","Unnecessary escape character: \\'.","TemplateElement","unnecessaryEscape",["45","46"],"Unnecessary escape character: \\i.",["47","48"],{"messageId":"49","fix":"50","desc":"51"},{"messageId":"52","fix":"53","desc":"54"},{"messageId":"49","fix":"55","desc":"51"},{"messageId":"52","fix":"56","desc":"54"},"removeEscape",{"range":"57","text":"58"},"Remove the `\\`. This maintains the current functionality.","escapeBackslash",{"range":"59","text":"60"},"Replace the `\\` with `\\\\` to include the actual backslash character.",{"range":"61","text":"58"},{"range":"62","text":"60"},[26157,26158],"",[26157,26157],"\\",[26160,26161],[26160,26160]]
.gitignore CHANGED
@@ -9,7 +9,7 @@
9
  /coverage
10
 
11
  # production
12
- /build
13
 
14
  # misc
15
  .DS_Store
 
9
  /coverage
10
 
11
  # production
12
+ # /build
13
 
14
  # misc
15
  .DS_Store
README.md CHANGED
@@ -1,3 +1,12 @@
 
 
 
 
 
 
 
 
 
1
  # Getting Started with Create React App
2
 
3
  This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
 
1
+ ---
2
+ title: Datasets Card Creator
3
+ emoji: 🏆
4
+ colorFrom: blue
5
+ colorTo: blue
6
+ sdk: static
7
+ app_file: build/index.html
8
+ ---
9
+
10
  # Getting Started with Create React App
11
 
12
  This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
build/asset-manifest.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "files": {
3
+ "main.css": "./static/css/main.a2993414.chunk.css",
4
+ "main.js": "./static/js/main.cbff00b9.chunk.js",
5
+ "main.js.map": "./static/js/main.cbff00b9.chunk.js.map",
6
+ "runtime-main.js": "./static/js/runtime-main.73e65ee8.js",
7
+ "runtime-main.js.map": "./static/js/runtime-main.73e65ee8.js.map",
8
+ "static/js/2.bc6de3e6.chunk.js": "./static/js/2.bc6de3e6.chunk.js",
9
+ "static/js/2.bc6de3e6.chunk.js.map": "./static/js/2.bc6de3e6.chunk.js.map",
10
+ "static/js/3.523cfdab.chunk.js": "./static/js/3.523cfdab.chunk.js",
11
+ "static/js/3.523cfdab.chunk.js.map": "./static/js/3.523cfdab.chunk.js.map",
12
+ "index.html": "./index.html",
13
+ "static/css/main.a2993414.chunk.css.map": "./static/css/main.a2993414.chunk.css.map",
14
+ "static/js/2.bc6de3e6.chunk.js.LICENSE.txt": "./static/js/2.bc6de3e6.chunk.js.LICENSE.txt"
15
+ },
16
+ "entrypoints": [
17
+ "static/js/runtime-main.73e65ee8.js",
18
+ "static/js/2.bc6de3e6.chunk.js",
19
+ "static/css/main.a2993414.chunk.css",
20
+ "static/js/main.cbff00b9.chunk.js"
21
+ ]
22
+ }
build/favicon.ico ADDED
build/index.html ADDED
@@ -0,0 +1 @@
 
 
1
+ <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Web site created using create-react-app"/><link rel="apple-touch-icon" href="./logo192.png"/><link rel="manifest" href="./manifest.json"/><title>React App</title><link href="./static/css/main.a2993414.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function t(t){for(var n,u,i=t[0],c=t[1],l=t[2],s=0,p=[];s<i.length;s++)u=i[s],Object.prototype.hasOwnProperty.call(o,u)&&o[u]&&p.push(o[u][0]),o[u]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(t);p.length;)p.shift()();return a.push.apply(a,l||[]),r()}function r(){for(var e,t=0;t<a.length;t++){for(var r=a[t],n=!0,i=1;i<r.length;i++){var c=r[i];0!==o[c]&&(n=!1)}n&&(a.splice(t--,1),e=u(u.s=r[0]))}return e}var n={},o={1:0},a=[];function u(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,u),r.l=!0,r.exports}u.e=function(e){var t=[],r=o[e];if(0!==r)if(r)t.push(r[2]);else{var n=new Promise((function(t,n){r=o[e]=[t,n]}));t.push(r[2]=n);var a,i=document.createElement("script");i.charset="utf-8",i.timeout=120,u.nc&&i.setAttribute("nonce",u.nc),i.src=function(e){return u.p+"static/js/"+({}[e]||e)+"."+{3:"523cfdab"}[e]+".chunk.js"}(e);var c=new Error;a=function(t){i.onerror=i.onload=null,clearTimeout(l);var r=o[e];if(0!==r){if(r){var n=t&&("load"===t.type?"missing":t.type),a=t&&t.target&&t.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+a+")",c.name="ChunkLoadError",c.type=n,c.request=a,r[1](c)}o[e]=void 0}};var l=setTimeout((function(){a({type:"timeout",target:i})}),12e4);i.onerror=i.onload=a,document.head.appendChild(i)}return Promise.all(t)},u.m=e,u.c=n,u.d=function(e,t,r){u.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},u.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},u.t=function(e,t){if(1&t&&(e=u(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(u.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)u.d(r,n,function(t){return e[t]}.bind(null,n));return r},u.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return u.d(t,"a",t),t},u.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},u.p="./",u.oe=function(e){throw console.error(e),e};var i=this.webpackJsonpdatasetcard=this.webpackJsonpdatasetcard||[],c=i.push.bind(i);i.push=t,i=i.slice();for(var l=0;l<i.length;l++)t(i[l]);var f=c;r()}([])</script><script src="./static/js/2.bc6de3e6.chunk.js"></script><script src="./static/js/main.cbff00b9.chunk.js"></script></body></html>
build/logo192.png ADDED
build/logo512.png ADDED
build/manifest.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "short_name": "React App",
3
+ "name": "Create React App Sample",
4
+ "icons": [
5
+ {
6
+ "src": "favicon.ico",
7
+ "sizes": "64x64 32x32 24x24 16x16",
8
+ "type": "image/x-icon"
9
+ },
10
+ {
11
+ "src": "logo192.png",
12
+ "type": "image/png",
13
+ "sizes": "192x192"
14
+ },
15
+ {
16
+ "src": "logo512.png",
17
+ "type": "image/png",
18
+ "sizes": "512x512"
19
+ }
20
+ ],
21
+ "start_url": ".",
22
+ "display": "standalone",
23
+ "theme_color": "#000000",
24
+ "background_color": "#ffffff"
25
+ }
build/robots.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # https://www.robotstxt.org/robotstxt.html
2
+ User-agent: *
3
+ Disallow:
build/static/css/main.a2993414.chunk.css ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .space-y-8>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(2rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(2rem*var(--tw-space-y-reverse))}.divide-y-2>:not([hidden])~:not([hidden]){--tw-divide-y-reverse:0;border-top-width:calc(2px*(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(2px*var(--tw-divide-y-reverse))}.divide-y>:not([hidden])~:not([hidden]){--tw-divide-y-reverse:0;border-top-width:calc(1px*(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(1px*var(--tw-divide-y-reverse))}.divide-gray-200>:not([hidden])~:not([hidden]){--tw-divide-opacity:1;border-color:rgba(237,242,247,var(--tw-divide-opacity))}.bg-white{--tw-bg-opacity:1;background-color:rgba(255,255,255,var(--tw-bg-opacity))}.bg-gray-100{--tw-bg-opacity:1;background-color:rgba(247,250,252,var(--tw-bg-opacity))}.border-gray-200{--tw-border-opacity:1;border-color:rgba(237,242,247,var(--tw-border-opacity))}.border-gray-300{--tw-border-opacity:1;border-color:rgba(226,232,240,var(--tw-border-opacity))}.rounded-md{border-radius:.375rem}.rounded-lg{border-radius:.5rem}.border-solid{border-style:solid}.border-none{border-style:none}.border{border-width:1px}.cursor-pointer{cursor:pointer}.block{display:block}.inline-block{display:inline-block}.flex{display:flex}.inline-flex{display:inline-flex}.table{display:table}.items-center{align-items:center}.justify-end{justify-content:flex-end}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.font-sans{font-family:system-ui,-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji"}.font-normal{font-weight:400}.font-medium{font-weight:500}.font-extrabold{font-weight:800}.h-10{height:2.5rem}.h-screen{height:100vh}.text-xs{font-size:.75rem}.text-base{font-size:1rem}.text-lg{font-size:1.125rem}.text-xl{font-size:1.25rem}.text-4xl{font-size:2.25rem}.leading-4{line-height:1rem}.mx-auto{margin-left:auto;margin-right:auto}.mt-1{margin-top:.25rem}.ml-1{margin-left:.25rem}.mt-2{margin-top:.5rem}.ml-2{margin-left:.5rem}.mt-4{margin-top:1rem}.mr-4{margin-right:1rem}.ml-4{margin-left:1rem}.mt-5{margin-top:1.25rem}.mt-12{margin-top:3rem}.mb-32{margin-bottom:8rem}.max-h-screen{max-height:100vh}.max-w-xl{max-width:36rem}.max-w-7xl{max-width:80rem}.min-h-full{min-height:100%}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.overflow-hidden{overflow:hidden}.overflow-y-auto{overflow-y:auto}.p-2{padding:.5rem}.p-6{padding:1.5rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-4{padding-top:1rem;padding-bottom:1rem}.px-4{padding-left:1rem;padding-right:1rem}.py-8{padding-top:2rem;padding-bottom:2rem}.py-12{padding-top:3rem;padding-bottom:3rem}.pt-6{padding-top:1.5rem}.absolute{position:absolute}.bottom-0{bottom:0}.left-0{left:0}*{--tw-shadow:0 0 transparent}.shadow-sm{--tw-shadow:0 1px 2px 0 rgba(0,0,0,0.05)}.shadow,.shadow-sm{box-shadow:var(--tw-ring-offset-shadow,0 0 transparent),var(--tw-ring-shadow,0 0 transparent),var(--tw-shadow)}.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px 0 rgba(0,0,0,0.06)}*{--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(66,153,225,0.5);--tw-ring-offset-shadow:0 0 transparent;--tw-ring-shadow:0 0 transparent}.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 transparent)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px}.focus\:ring-gray-500:focus{--tw-ring-opacity:1;--tw-ring-color:rgba(160,174,192,var(--tw-ring-opacity))}.text-left{text-align:left}.text-center{text-align:center}.text-gray-500{--tw-text-opacity:1;color:rgba(160,174,192,var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity:1;color:rgba(113,128,150,var(--tw-text-opacity))}.text-gray-700{--tw-text-opacity:1;color:rgba(74,85,104,var(--tw-text-opacity))}.no-underline{text-decoration:none}.w-80{width:20rem}.w-full{width:100%}.gap-6{gap:1.5rem}.grid-cols-12{grid-template-columns:repeat(12,minmax(0,1fr))}.col-span-4{grid-column:span 4/span 4}.col-span-8{grid-column:span 8/span 8}@-webkit-keyframes spin{to{transform:rotate(1turn)}}@keyframes spin{to{transform:rotate(1turn)}}@-webkit-keyframes ping{75%,to{transform:scale(2);opacity:0}}@keyframes ping{75%,to{transform:scale(2);opacity:0}}@-webkit-keyframes pulse{50%{opacity:.5}}@keyframes pulse{50%{opacity:.5}}@-webkit-keyframes bounce{0%,to{transform:translateY(-25%);-webkit-animation-timing-function:cubic-bezier(.8,0,1,1);animation-timing-function:cubic-bezier(.8,0,1,1)}50%{transform:none;-webkit-animation-timing-function:cubic-bezier(0,0,.2,1);animation-timing-function:cubic-bezier(0,0,.2,1)}}@keyframes bounce{0%,to{transform:translateY(-25%);-webkit-animation-timing-function:cubic-bezier(.8,0,1,1);animation-timing-function:cubic-bezier(.8,0,1,1)}50%{transform:none;-webkit-animation-timing-function:cubic-bezier(0,0,.2,1);animation-timing-function:cubic-bezier(0,0,.2,1)}}.grid{display:grid}.col-span-4{grid-column-start:span 4}.col-span-8{grid-column-start:span 8}@media (min-width:500px){.xs\:max-w-xs{max-width:20rem}}@media (min-width:640px){.sm\:text-sm{font-size:.875rem}.sm\:px-6{padding-left:1.5rem;padding-right:1.5rem}.sm\:py-12{padding-top:3rem;padding-bottom:3rem}.sm\:tracking-tight{letter-spacing:-.025em}}@media (min-width:768px){.md\:max-h-xs{max-height:20rem}.md\:max-w-2xl{max-width:42rem}}@media (min-width:1024px){.lg\:max-h-md{max-height:28rem}.lg\:px-8{padding-left:2rem;padding-right:2rem}.lg\:py-16{padding-top:4rem;padding-bottom:4rem}}@media (min-width:1280px){.xl\:max-h-xl{max-height:36rem}.xl\:max-w-4xl{max-width:56rem}}@media (min-width:1650px){.xxl\:max-h-screen{max-height:100vh}.xxl\:min-w-5xl{min-width:64rem}}
2
+ /*# sourceMappingURL=main.a2993414.chunk.css.map */
build/static/css/main.a2993414.chunk.css.map ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":3,"sources":["webpack://src/index.css","<no source>","main.a2993414.chunk.css"],"names":[],"mappings":"AAGA,yCAAA,sBAAmB,CAAnB,qDAAmB,CAAnB,kDAAmB,CAAnB,0CAAA,uBAAmB,CAAnB,2DAAmB,CAAnB,wDAAmB,CAAnB,wCAAA,uBAAmB,CAAnB,2DAAmB,CAAnB,wDAAmB,CAAnB,+CAAA,qBAAmB,CAAnB,uDAAmB,CAAnB,UAAA,iBAAmB,CAAnB,uDAAmB,CAAnB,aAAA,iBAAmB,CAAnB,uDAAmB,CAAnB,iBAAA,qBAAmB,CAAnB,uDAAmB,CAAnB,iBAAA,qBAAmB,CAAnB,uDAAmB,CAAnB,YAAA,qBAAmB,CAAnB,YAAA,mBAAmB,CAAnB,cAAA,kBAAmB,CAAnB,aAAA,iBAAmB,CAAnB,QAAA,gBAAmB,CAAnB,gBAAA,cAAmB,CAAnB,OAAA,aAAmB,CAAnB,cAAA,oBAAmB,CAAnB,MAAA,YAAmB,CAAnB,aAAA,mBAAmB,CAAnB,OAAA,aAAmB,CAAnB,cAAA,kBAAmB,CAAnB,aAAA,wBAAmB,CAAnB,gBAAA,sBAAmB,CAAnB,iBAAA,6BAAmB,CAAnB,WAAA,gMAAmB,CAAnB,aAAA,eAAmB,CAAnB,aAAA,eAAmB,CAAnB,gBAAA,eAAmB,CAAnB,MAAA,aAAmB,CAAnB,UAAA,YAAmB,CAAnB,SAAA,gBAAmB,CAAnB,WAAA,cAAmB,CAAnB,SAAA,kBAAmB,CAAnB,SAAA,iBAAmB,CAAnB,UAAA,iBAAmB,CAAnB,WAAA,gBAAmB,CAAnB,SAAA,gBAAmB,CAAnB,iBAAmB,CAAnB,MAAA,iBAAmB,CAAnB,MAAA,kBAAmB,CAAnB,MAAA,gBAAmB,CAAnB,MAAA,iBAAmB,CAAnB,MAAA,eAAmB,CAAnB,MAAA,iBAAmB,CAAnB,MAAA,gBAAmB,CAAnB,MAAA,kBAAmB,CAAnB,OAAA,eAAmB,CAAnB,OAAA,kBAAmB,CAAnB,cAAA,gBAAmB,CAAnB,UAAA,eAAmB,CAAnB,WAAA,eAAmB,CAAnB,YAAA,eAAmB,CAAnB,2BAAA,6BAAmB,CAAnB,kBAAmB,CAAnB,iBAAA,eAAmB,CAAnB,iBAAA,eAAmB,CAAnB,KAAA,aAAmB,CAAnB,KAAA,cAAmB,CAAnB,MAAA,iBAAmB,CAAnB,oBAAmB,CAAnB,MAAA,mBAAmB,CAAnB,oBAAmB,CAAnB,MAAA,gBAAmB,CAAnB,mBAAmB,CAAnB,MAAA,iBAAmB,CAAnB,kBAAmB,CAAnB,MAAA,gBAAmB,CAAnB,mBAAmB,CAAnB,OAAA,gBAAmB,CAAnB,mBAAmB,CAAnB,MAAA,kBAAmB,CAAnB,UAAA,iBAAmB,CAAnB,UAAA,QAAmB,CAAnB,QAAA,MAAmB,CAAnB,EAAA,2BAAmB,CAAnB,WAAA,wCAAmB,CAAnB,mBAAA,8GAAmB,CAAnB,QAAA,oEAAmB,CAAnB,EAAA,2CAAmB,CAAnB,0BAAmB,CAAnB,2BAAmB,CAAnB,oCAAmB,CAAnB,uCAAmB,CAAnB,gCAAmB,CAAnB,qBAAA,0GAAmB,CAAnB,wGAAmB,CAAnB,8FAAmB,CAAnB,4BAAA,0BAAmB,CAAnB,4BAAA,mBAAmB,CAAnB,wDAAmB,CAAnB,WAAA,eAAmB,CAAnB,aAAA,iBAAmB,CAAnB,eAAA,mBAAmB,CAAnB,8CAAmB,CAAnB,eAAA,mBAAmB,CAAnB,8CAAmB,CAAnB,eAAA,mBAAmB,CAAnB,4CAAmB,CAAnB,cAAA,oBAAmB,CAAnB,MAAA,WAAmB,CAAnB,QAAA,UAAmB,CAAnB,OAAA,UAAmB,CAAnB,cAAA,8CAAmB,CAAnB,YAAA,yBAAmB,CAAnB,YAAA,yBAAmB,CAAnB,wBAAA,GAAA,uBAAmB,CAAA,CAAnB,gBAAA,GAAA,uBAAmB,CAAA,CAAnB,wBAAA,OAAA,kBAAmB,CAAnB,SAAmB,CAAA,CAAnB,gBAAA,OAAA,kBAAmB,CAAnB,SAAmB,CAAA,CAAnB,yBAAA,IAAA,UAAmB,CAAA,CAAnB,iBAAA,IAAA,UAAmB,CAAA,CAAnB,0BAAA,MAAA,0BAAmB,CAAnB,wDAAmB,CAAnB,gDAAmB,CAAnB,IAAA,cAAmB,CAAnB,wDAAmB,CAAnB,gDAAmB,CAAA,CAAnB,kBAAA,MAAA,0BAAmB,CAAnB,wDAAmB,CAAnB,gDAAmB,CAAnB,IAAA,cAAmB,CAAnB,wDAAmB,CAAnB,gDAAmB,CAAA,CAAnB,MAAA,YAAmB,CAAnB,YAAA,wBAAmB,CAAnB,YAAA,wBAAmB,CCHnB,yBDGA,cAAA,eAAmB,CEqcnB,CDxcA,yBDGA,aAAA,iBAAmB,CAAnB,UAAA,mBAAmB,CAAnB,oBAAmB,CAAnB,WAAA,gBAAmB,CAAnB,mBAAmB,CAAnB,oBAAA,sBAAmB,CEydnB,CD5dA,yBDGA,cAAA,gBAAmB,CAAnB,eAAA,eAAmB,CEmenB,CDteA,0BDGA,cAAA,gBAAmB,CAAnB,UAAA,iBAAmB,CAAnB,kBAAmB,CAAnB,WAAA,gBAAmB,CAAnB,mBAAmB,CEmfnB,CDtfA,0BDGA,cAAA,gBAAmB,CAAnB,eAAA,eAAmB,CE6fnB,CDhgBA,0BDGA,mBAAA,gBAAmB,CAAnB,gBAAA,eAAmB,CEugBnB","file":"main.a2993414.chunk.css","sourcesContent":["/* ./src/index.css */\n@tailwind base;\n@tailwind components;\n@tailwind utilities;",null,"/* ./src/index.css */\n\n.space-y-8 > :not([hidden]) ~ :not([hidden]) {\n --tw-space-y-reverse: 0;\n margin-top: calc(2rem * calc(1 - var(--tw-space-y-reverse)));\n margin-bottom: calc(2rem * var(--tw-space-y-reverse))\n}\n\n.divide-y-2 > :not([hidden]) ~ :not([hidden]) {\n --tw-divide-y-reverse: 0;\n border-top-width: calc(2px * calc(1 - var(--tw-divide-y-reverse)));\n border-bottom-width: calc(2px * var(--tw-divide-y-reverse))\n}\n\n.divide-y > :not([hidden]) ~ :not([hidden]) {\n --tw-divide-y-reverse: 0;\n border-top-width: calc(1px * calc(1 - var(--tw-divide-y-reverse)));\n border-bottom-width: calc(1px * var(--tw-divide-y-reverse))\n}\n\n.divide-gray-200 > :not([hidden]) ~ :not([hidden]) {\n --tw-divide-opacity: 1;\n border-color: rgba(237, 242, 247, var(--tw-divide-opacity))\n}\n\n.bg-white {\n --tw-bg-opacity: 1;\n background-color: rgba(255, 255, 255, var(--tw-bg-opacity))\n}\n\n.bg-gray-100 {\n --tw-bg-opacity: 1;\n background-color: rgba(247, 250, 252, var(--tw-bg-opacity))\n}\n\n.border-gray-200 {\n --tw-border-opacity: 1;\n border-color: rgba(237, 242, 247, var(--tw-border-opacity))\n}\n\n.border-gray-300 {\n --tw-border-opacity: 1;\n border-color: rgba(226, 232, 240, var(--tw-border-opacity))\n}\n\n.rounded-md {\n border-radius: 0.375rem\n}\n\n.rounded-lg {\n border-radius: 0.5rem\n}\n\n.border-solid {\n border-style: solid\n}\n\n.border-none {\n border-style: none\n}\n\n.border {\n border-width: 1px\n}\n\n.cursor-pointer {\n cursor: pointer\n}\n\n.block {\n display: block\n}\n\n.inline-block {\n display: inline-block\n}\n\n.flex {\n display: flex\n}\n\n.inline-flex {\n display: inline-flex\n}\n\n.table {\n display: table\n}\n\n.grid {\n display: grid\n}\n\n.items-center {\n align-items: center\n}\n\n.justify-end {\n justify-content: flex-end\n}\n\n.justify-center {\n justify-content: center\n}\n\n.justify-between {\n justify-content: space-between\n}\n\n.font-sans {\n font-family: system-ui, -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, \"Noto Sans\", sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\"\n}\n\n.font-normal {\n font-weight: 400\n}\n\n.font-medium {\n font-weight: 500\n}\n\n.font-extrabold {\n font-weight: 800\n}\n\n.h-10 {\n height: 2.5rem\n}\n\n.h-screen {\n height: 100vh\n}\n\n.text-xs {\n font-size: 0.75rem\n}\n\n.text-base {\n font-size: 1rem\n}\n\n.text-lg {\n font-size: 1.125rem\n}\n\n.text-xl {\n font-size: 1.25rem\n}\n\n.text-4xl {\n font-size: 2.25rem\n}\n\n.leading-4 {\n line-height: 1rem\n}\n\n.mx-auto {\n margin-left: auto;\n margin-right: auto\n}\n\n.mt-1 {\n margin-top: 0.25rem\n}\n\n.ml-1 {\n margin-left: 0.25rem\n}\n\n.mt-2 {\n margin-top: 0.5rem\n}\n\n.ml-2 {\n margin-left: 0.5rem\n}\n\n.mt-4 {\n margin-top: 1rem\n}\n\n.mr-4 {\n margin-right: 1rem\n}\n\n.ml-4 {\n margin-left: 1rem\n}\n\n.mt-5 {\n margin-top: 1.25rem\n}\n\n.mt-12 {\n margin-top: 3rem\n}\n\n.mb-32 {\n margin-bottom: 8rem\n}\n\n.max-h-screen {\n max-height: 100vh\n}\n\n.max-w-xl {\n max-width: 36rem\n}\n\n.max-w-7xl {\n max-width: 80rem\n}\n\n.min-h-full {\n min-height: 100%\n}\n\n.focus\\:outline-none:focus {\n outline: 2px solid transparent;\n outline-offset: 2px\n}\n\n.overflow-hidden {\n overflow: hidden\n}\n\n.overflow-y-auto {\n overflow-y: auto\n}\n\n.p-2 {\n padding: 0.5rem\n}\n\n.p-6 {\n padding: 1.5rem\n}\n\n.py-2 {\n padding-top: 0.5rem;\n padding-bottom: 0.5rem\n}\n\n.px-3 {\n padding-left: 0.75rem;\n padding-right: 0.75rem\n}\n\n.py-4 {\n padding-top: 1rem;\n padding-bottom: 1rem\n}\n\n.px-4 {\n padding-left: 1rem;\n padding-right: 1rem\n}\n\n.py-8 {\n padding-top: 2rem;\n padding-bottom: 2rem\n}\n\n.py-12 {\n padding-top: 3rem;\n padding-bottom: 3rem\n}\n\n.pt-6 {\n padding-top: 1.5rem\n}\n\n.absolute {\n position: absolute\n}\n\n.bottom-0 {\n bottom: 0\n}\n\n.left-0 {\n left: 0\n}\n\n* {\n --tw-shadow: 0 0 #0000\n}\n\n.shadow-sm {\n --tw-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);\n box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow)\n}\n\n.shadow {\n --tw-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06);\n box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow)\n}\n\n* {\n --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/);\n --tw-ring-offset-width: 0px;\n --tw-ring-offset-color: #fff;\n --tw-ring-color: rgba(66, 153, 225, 0.5);\n --tw-ring-offset-shadow: 0 0 #0000;\n --tw-ring-shadow: 0 0 #0000\n}\n\n.focus\\:ring-2:focus {\n --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);\n --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);\n box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000)\n}\n\n.focus\\:ring-offset-2:focus {\n --tw-ring-offset-width: 2px\n}\n\n.focus\\:ring-gray-500:focus {\n --tw-ring-opacity: 1;\n --tw-ring-color: rgba(160, 174, 192, var(--tw-ring-opacity))\n}\n\n.text-left {\n text-align: left\n}\n\n.text-center {\n text-align: center\n}\n\n.text-gray-500 {\n --tw-text-opacity: 1;\n color: rgba(160, 174, 192, var(--tw-text-opacity))\n}\n\n.text-gray-600 {\n --tw-text-opacity: 1;\n color: rgba(113, 128, 150, var(--tw-text-opacity))\n}\n\n.text-gray-700 {\n --tw-text-opacity: 1;\n color: rgba(74, 85, 104, var(--tw-text-opacity))\n}\n\n.no-underline {\n text-decoration: none\n}\n\n.w-80 {\n width: 20rem\n}\n\n.w-full {\n width: 100%\n}\n\n.gap-6 {\n gap: 1.5rem\n}\n\n.grid-cols-12 {\n grid-template-columns: repeat(12, minmax(0, 1fr))\n}\n\n.col-span-4 {\n grid-column: span 4 / span 4\n}\n\n.col-span-8 {\n grid-column: span 8 / span 8\n}\n\n@-webkit-keyframes spin {\n to {\n transform: rotate(360deg)\n }\n}\n\n@keyframes spin {\n to {\n transform: rotate(360deg)\n }\n}\n\n@-webkit-keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0\n }\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0\n }\n}\n\n@-webkit-keyframes pulse {\n 50% {\n opacity: .5\n }\n}\n\n@keyframes pulse {\n 50% {\n opacity: .5\n }\n}\n\n@-webkit-keyframes bounce {\n 0%, 100% {\n transform: translateY(-25%);\n -webkit-animation-timing-function: cubic-bezier(0.8,0,1,1);\n animation-timing-function: cubic-bezier(0.8,0,1,1)\n }\n\n 50% {\n transform: none;\n -webkit-animation-timing-function: cubic-bezier(0,0,0.2,1);\n animation-timing-function: cubic-bezier(0,0,0.2,1)\n }\n}\n\n@keyframes bounce {\n 0%, 100% {\n transform: translateY(-25%);\n -webkit-animation-timing-function: cubic-bezier(0.8,0,1,1);\n animation-timing-function: cubic-bezier(0.8,0,1,1)\n }\n\n 50% {\n transform: none;\n -webkit-animation-timing-function: cubic-bezier(0,0,0.2,1);\n animation-timing-function: cubic-bezier(0,0,0.2,1)\n }\n}\n\n.grid {\n display: grid\n}\n\n.col-span-4 {\n grid-column-start: span 4\n}\n\n.col-span-8 {\n grid-column-start: span 8\n}\n\n@media (min-width: 500px) {\n .xs\\:max-w-xs {\n max-width: 20rem\n }\n}\n\n@media (min-width: 640px) {\n .sm\\:text-sm {\n font-size: 0.875rem\n }\n\n .sm\\:px-6 {\n padding-left: 1.5rem;\n padding-right: 1.5rem\n }\n\n .sm\\:py-12 {\n padding-top: 3rem;\n padding-bottom: 3rem\n }\n\n .sm\\:tracking-tight {\n letter-spacing: -0.025em\n }\n}\n\n@media (min-width: 768px) {\n .md\\:max-h-xs {\n max-height: 20rem\n }\n\n .md\\:max-w-2xl {\n max-width: 42rem\n }\n}\n\n@media (min-width: 1024px) {\n .lg\\:max-h-md {\n max-height: 28rem\n }\n\n .lg\\:px-8 {\n padding-left: 2rem;\n padding-right: 2rem\n }\n\n .lg\\:py-16 {\n padding-top: 4rem;\n padding-bottom: 4rem\n }\n}\n\n@media (min-width: 1280px) {\n .xl\\:max-h-xl {\n max-height: 36rem\n }\n\n .xl\\:max-w-4xl {\n max-width: 56rem\n }\n}\n\n@media (min-width: 1650px) {\n .xxl\\:max-h-screen {\n max-height: 100vh\n }\n\n .xxl\\:min-w-5xl {\n min-width: 64rem\n }\n}\n"]}
build/static/js/2.bc6de3e6.chunk.js ADDED
The diff for this file is too large to render. See raw diff
 
build/static/js/2.bc6de3e6.chunk.js.LICENSE.txt ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ object-assign
3
+ (c) Sindre Sorhus
4
+ @license MIT
5
+ */
6
+
7
+ /*!
8
+ * Determine if an object is a Buffer
9
+ *
10
+ * @author Feross Aboukhadijeh <https://feross.org>
11
+ * @license MIT
12
+ */
13
+
14
+ /** @license React v0.20.1
15
+ * scheduler.production.min.js
16
+ *
17
+ * Copyright (c) Facebook, Inc. and its affiliates.
18
+ *
19
+ * This source code is licensed under the MIT license found in the
20
+ * LICENSE file in the root directory of this source tree.
21
+ */
22
+
23
+ /** @license React v16.13.1
24
+ * react-is.production.min.js
25
+ *
26
+ * Copyright (c) Facebook, Inc. and its affiliates.
27
+ *
28
+ * This source code is licensed under the MIT license found in the
29
+ * LICENSE file in the root directory of this source tree.
30
+ */
31
+
32
+ /** @license React v17.0.1
33
+ * react-dom.production.min.js
34
+ *
35
+ * Copyright (c) Facebook, Inc. and its affiliates.
36
+ *
37
+ * This source code is licensed under the MIT license found in the
38
+ * LICENSE file in the root directory of this source tree.
39
+ */
40
+
41
+ /** @license React v17.0.1
42
+ * react-jsx-runtime.production.min.js
43
+ *
44
+ * Copyright (c) Facebook, Inc. and its affiliates.
45
+ *
46
+ * This source code is licensed under the MIT license found in the
47
+ * LICENSE file in the root directory of this source tree.
48
+ */
49
+
50
+ /** @license React v17.0.1
51
+ * react.production.min.js
52
+ *
53
+ * Copyright (c) Facebook, Inc. and its affiliates.
54
+ *
55
+ * This source code is licensed under the MIT license found in the
56
+ * LICENSE file in the root directory of this source tree.
57
+ */
build/static/js/2.bc6de3e6.chunk.js.map ADDED
The diff for this file is too large to render. See raw diff
 
build/static/js/3.523cfdab.chunk.js ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ (this.webpackJsonpdatasetcard=this.webpackJsonpdatasetcard||[]).push([[3],{140:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return v})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},l=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},v=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=l(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=l(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=l(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=l(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
2
+ //# sourceMappingURL=3.523cfdab.chunk.js.map
build/static/js/3.523cfdab.chunk.js.map ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":3,"sources":["../node_modules/web-vitals/dist/web-vitals.es5.min.js"],"names":["v","t","n","e","concat","Date","now","Math","floor","random","i","arguments","length","name","value","delta","entries","id","isFinal","a","PerformanceObserver","supportedEntryTypes","includes","getEntries","map","observe","type","buffered","r","o","s","persisted","u","addEventListener","c","timeStamp","document","visibilityState","isUnloading","capture","once","l","disconnect","p","hadRecentInput","push","takeRecords","d","startTime","f","processingStart","window","perfMetrics","onFirstInputDelay","entryType","target","cancelable","m","Promise","passive","g","then","h","performance","getEntriesByType","timing","max","navigationStart","responseStart","readyState","setTimeout"],"mappings":"4GAAA,gFAAAA,KAAA,0HAAIC,EAAEC,EAAEC,EAAE,WAAW,MAAM,GAAGC,OAAOC,KAAKC,MAAM,KAAKF,OAAOG,KAAKC,MAAM,cAAcD,KAAKE,UAAU,OAAOC,EAAE,SAAST,GAAG,IAAIC,EAAES,UAAUC,OAAO,QAAG,IAASD,UAAU,GAAGA,UAAU,IAAI,EAAE,MAAM,CAACE,KAAKZ,EAAEa,MAAMZ,EAAEa,MAAM,EAAEC,QAAQ,GAAGC,GAAGd,IAAIe,SAAQ,IAAKC,EAAE,SAASlB,EAAEC,GAAG,IAAI,GAAGkB,oBAAoBC,oBAAoBC,SAASrB,GAAG,CAAC,IAAIE,EAAE,IAAIiB,qBAAqB,SAASnB,GAAG,OAAOA,EAAEsB,aAAaC,IAAItB,MAAM,OAAOC,EAAEsB,QAAQ,CAACC,KAAKzB,EAAE0B,UAAS,IAAKxB,GAAG,MAAMF,MAAM2B,GAAE,EAAGC,GAAE,EAAGC,EAAE,SAAS7B,GAAG2B,GAAG3B,EAAE8B,WAAWC,EAAE,WAAWC,iBAAiB,WAAWH,GAAGG,iBAAiB,gBAAgB,gBAAgBC,EAAE,SAASjC,GAAG,IAAIC,EAAES,UAAUC,OAAO,QAAG,IAASD,UAAU,IAAIA,UAAU,GAAGkB,IAAIG,IAAIH,GAAE,GAAII,iBAAiB,oBAAoB,SAAS/B,GAAG,IAAIC,EAAED,EAAEiC,UAAU,WAAWC,SAASC,iBAAiBpC,EAAE,CAACkC,UAAUhC,EAAEmC,YAAYV,MAAM,CAACW,SAAQ,EAAGC,KAAKtC,KAAKuC,EAAE,SAASxC,EAAEC,EAAEC,EAAEO,GAAG,IAAIS,EAAE,OAAO,WAAWhB,GAAGD,EAAEgB,SAASf,EAAEuC,aAAaxC,EAAEY,OAAO,IAAIJ,GAAGR,EAAEgB,SAAS,WAAWkB,SAASC,mBAAmBnC,EAAEa,MAAMb,EAAEY,OAAOK,GAAG,IAAIjB,EAAEa,OAAOb,EAAEgB,cAAS,IAASC,KAAKlB,EAAEC,GAAGiB,EAAEjB,EAAEY,UAAU6B,EAAE,SAAS1C,GAAG,IAAIC,EAAEC,EAAEQ,UAAUC,OAAO,QAAG,IAASD,UAAU,IAAIA,UAAU,GAAGiB,EAAElB,EAAE,MAAM,GAAGmB,EAAE,SAAS5B,GAAGA,EAAE2C,iBAAiBhB,EAAEd,OAAOb,EAAEa,MAAMc,EAAEZ,QAAQ6B,KAAK5C,GAAGC,MAAM4B,EAAEX,EAAE,eAAeU,GAAGC,IAAI5B,EAAEuC,EAAExC,EAAE2B,EAAEE,EAAE3B,GAAG+B,GAAG,SAASjC,GAAG,IAAIE,EAAEF,EAAEqC,YAAYR,EAAEgB,cAActB,IAAIK,GAAG1B,IAAIyB,EAAEV,SAAQ,GAAIhB,SAAS6C,EAAE,WAAW,YAAO,IAAS9C,IAAIA,EAAE,WAAWmC,SAASC,gBAAgB,EAAE,IAAIH,GAAG,SAAShC,GAAG,IAAIC,EAAED,EAAEiC,UAAU,OAAOlC,EAAEE,KAAI,IAAK,CAAC,gBAAgB,OAAOF,KAAKD,EAAE,SAASC,GAAG,IAAIC,EAAEC,EAAEO,EAAE,OAAOkB,EAAEmB,IAAIlB,EAAEV,EAAE,SAAS,SAASlB,GAAG,2BAA2BA,EAAEY,MAAMZ,EAAE+C,UAAUpB,EAAEO,YAAYhC,EAAEW,MAAMb,EAAE+C,UAAU7C,EAAEe,SAAQ,EAAGf,EAAEa,QAAQ6B,KAAK5C,GAAGC,QAAQ2B,IAAI3B,EAAEuC,EAAExC,EAAEE,EAAE0B,KAAKoB,EAAE,SAAShD,GAAG,IAAIC,EAAEQ,EAAE,OAAOP,EAAE4C,IAAInB,EAAE,SAAS3B,GAAGA,EAAE+C,UAAU7C,EAAEgC,YAAYjC,EAAEY,MAAMb,EAAEiD,gBAAgBjD,EAAE+C,UAAU9C,EAAEc,QAAQ6B,KAAK5C,GAAGC,EAAEgB,SAAQ,EAAGY,MAAMD,EAAEV,EAAE,cAAcS,GAAGE,EAAEW,EAAExC,EAAEC,EAAE2B,GAAGA,EAAEK,GAAG,WAAWL,EAAEiB,cAActB,IAAII,GAAGC,EAAEa,gBAAe,GAAIS,OAAOC,aAAaD,OAAOC,YAAYC,mBAAmBF,OAAOC,YAAYC,mBAAmB,SAASpD,EAAES,GAAGA,EAAEyB,UAAUhC,EAAEgC,YAAYjC,EAAEY,MAAMb,EAAEC,EAAEgB,SAAQ,EAAGhB,EAAEc,QAAQ,CAAC,CAACsC,UAAU,cAAczC,KAAKH,EAAEgB,KAAK6B,OAAO7C,EAAE6C,OAAOC,WAAW9C,EAAE8C,WAAWR,UAAUtC,EAAEyB,UAAUe,gBAAgBxC,EAAEyB,UAAUlC,IAAI6B,SAAS2B,EAAE,WAAW,OAAOvD,IAAIA,EAAE,IAAIwD,SAAS,SAASzD,GAAG,MAAM,CAAC,SAAS,UAAU,eAAeuB,KAAK,SAAStB,GAAG+B,iBAAiB/B,EAAED,EAAE,CAACuC,MAAK,EAAGmB,SAAQ,EAAGpB,SAAQ,WAAYrC,GAAG0D,EAAE,SAAS3D,GAAG,IAAIC,EAAEC,EAAEQ,UAAUC,OAAO,QAAG,IAASD,UAAU,IAAIA,UAAU,GAAGiB,EAAElB,EAAE,OAAOmB,EAAEkB,IAAIjB,EAAE,SAAS7B,GAAG,IAAIE,EAAEF,EAAE+C,UAAU7C,EAAE0B,EAAEM,WAAWP,EAAEd,MAAMX,EAAEyB,EAAEZ,QAAQ6B,KAAK5C,IAAI2B,EAAEV,SAAQ,EAAGhB,KAAK8B,EAAEb,EAAE,2BAA2BW,GAAG,GAAGE,EAAE,CAAC9B,EAAEuC,EAAExC,EAAE2B,EAAEI,EAAE7B,GAAG,IAAIwC,EAAE,WAAWf,EAAEV,UAAUc,EAAEc,cAActB,IAAIM,GAAGF,EAAEV,SAAQ,EAAGhB,MAAMuD,IAAII,KAAKlB,GAAGT,EAAES,GAAE,KAAMmB,EAAE,SAAS7D,GAAG,IAAIC,EAAEC,EAAEO,EAAE,QAAQR,EAAE,WAAW,IAAI,IAAIA,EAAE6D,YAAYC,iBAAiB,cAAc,IAAI,WAAW,IAAI/D,EAAE8D,YAAYE,OAAO/D,EAAE,CAACoD,UAAU,aAAaN,UAAU,GAAG,IAAI,IAAI7C,KAAKF,EAAE,oBAAoBE,GAAG,WAAWA,IAAID,EAAEC,GAAGI,KAAK2D,IAAIjE,EAAEE,GAAGF,EAAEkE,gBAAgB,IAAI,OAAOjE,EAAhL,GAAqLC,EAAEW,MAAMX,EAAEY,MAAMb,EAAEkE,cAAcjE,EAAEa,QAAQ,CAACd,GAAGC,EAAEe,SAAQ,EAAGjB,EAAEE,GAAG,MAAMF,MAAM,aAAamC,SAASiC,WAAWC,WAAWpE,EAAE,GAAG+B,iBAAiB,WAAW/B","file":"static/js/3.523cfdab.chunk.js","sourcesContent":["var t,n,e=function(){return\"\".concat(Date.now(),\"-\").concat(Math.floor(8999999999999*Math.random())+1e12)},i=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:e(),isFinal:!1}},a=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},r=!1,o=!1,s=function(t){r=!t.persisted},u=function(){addEventListener(\"pagehide\",s),addEventListener(\"beforeunload\",(function(){}))},c=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];o||(u(),o=!0),addEventListener(\"visibilitychange\",(function(n){var e=n.timeStamp;\"hidden\"===document.visibilityState&&t({timeStamp:e,isUnloading:r})}),{capture:!0,once:n})},l=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||\"hidden\"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},p=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],r=i(\"CLS\",0),o=function(t){t.hadRecentInput||(r.value+=t.value,r.entries.push(t),n())},s=a(\"layout-shift\",o);s&&(n=l(t,r,s,e),c((function(t){var e=t.isUnloading;s.takeRecords().map(o),e&&(r.isFinal=!0),n()})))},d=function(){return void 0===t&&(t=\"hidden\"===document.visibilityState?0:1/0,c((function(n){var e=n.timeStamp;return t=e}),!0)),{get timeStamp(){return t}}},v=function(t){var n,e=i(\"FCP\"),r=d(),o=a(\"paint\",(function(t){\"first-contentful-paint\"===t.name&&t.startTime<r.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));o&&(n=l(t,e,o))},f=function(t){var n=i(\"FID\"),e=d(),r=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,s())},o=a(\"first-input\",r),s=l(t,n,o);o?c((function(){o.takeRecords().map(r),o.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:\"first-input\",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],s())}))},m=function(){return n||(n=new Promise((function(t){return[\"scroll\",\"keydown\",\"pointerdown\"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),n},g=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],r=i(\"LCP\"),o=d(),s=function(t){var e=t.startTime;e<o.timeStamp?(r.value=e,r.entries.push(t)):r.isFinal=!0,n()},u=a(\"largest-contentful-paint\",s);if(u){n=l(t,r,u,e);var p=function(){r.isFinal||(u.takeRecords().map(s),r.isFinal=!0,n())};m().then(p),c(p,!0)}},h=function(t){var n,e=i(\"TTFB\");n=function(){try{var n=performance.getEntriesByType(\"navigation\")[0]||function(){var t=performance.timing,n={entryType:\"navigation\",startTime:0};for(var e in t)\"navigationStart\"!==e&&\"toJSON\"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},\"complete\"===document.readyState?setTimeout(n,0):addEventListener(\"pageshow\",n)};export{p as getCLS,v as getFCP,f as getFID,g as getLCP,h as getTTFB};\n"],"sourceRoot":""}
build/static/js/main.cbff00b9.chunk.js ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ (this.webpackJsonpdatasetcard=this.webpackJsonpdatasetcard||[]).push([[0],{139:function(e,t,a){"use strict";a.r(t);var n=a(0),s=a(1),i=a.n(s),o=a(41),r=a.n(o),c=(a(48),a(18)),d=a(26),l=a(3),h=a.n(l),u=a(5),p=a(6);function m(e){var t=e.value,a=e.title,s=e.id,i=e.rows,o=e.handleClick,r=e.handleChange;return Object(n.jsxs)("div",{className:"",children:[Object(n.jsx)("div",{className:"text-base font-normal max-w-40 text-gray-600",children:a}),Object(n.jsx)("div",{className:"mt-2 mr-4",children:Object(n.jsx)("textarea",{value:t,onClick:function(e){return o(e)},onChange:function(e){return r(e)},id:s,name:s,rows:i,className:"font-sans p-2 shadow-sm border border-solid border-gray-300 block w-full text-gray-600 sm:text-sm rounded-md"})})]})}var f=a(28);function g(e){var t=e.title,a=e.section,s=e.handleSection;return Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)("div",{className:"mt-1",children:Object(n.jsxs)("div",{onClick:function(){return s()},className:"cursor-pointer flex justify-between inline-block pt-6 borders font-medium text-gray-700",children:[Object(n.jsxs)("div",{className:"",children:[" ",t," "]}),a?Object(n.jsx)(f.b,{className:"ml-2 "}):Object(n.jsx)(f.a,{className:"ml-2"})]})}),Object(n.jsx)("style",{children:"\n .borders {\n border-bottom: solid 1px;\n border-color: #e2e8f0;\n }\n "})]})}var b={name:"Instructions",instructions:{yamlTags:{paragraph:["Add YAML tags"],example:["---","annotations_creators:","- no-annotation","language_creators:","- found","languages:","- en","licenses:","- unknown","multilinguality:","- monolingual","size_categories:","- 100K<n<1M","source_datasets:","- original","task_categories:","- question-answering","task_ids:","- abstractive-qa","- open-domain-qa","---"]},homepage:{paragraph:["Add homepage URL here if available (unless it's a GitHub repository)"],example:["[ELI5 homepage](https://facebookresearch.github.io/ELI5/explore.html)"]},repository:{paragraph:["If the dataset is hosted on github or has a github homepage, add URL here"],example:["[ELI5 repository](https://github.com/facebookresearch/ELI5)"]},paper:{paragraph:["If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)"],example:["[ELI5: Long Form Question Answering](https://arxiv.org/abs/1907.09190)"]},leaderboard:{paragraph:["If the dataset supports an active leaderboard, add link here"],example:["[N/A]"]},contact:{paragraph:["If known, name and email of at least one person the reader can contact for questions about the dataset."],example:["[Yacine Jernite](mailto:yacine@huggingface.co)"]},datasetSummary:{paragraph:["Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. translations between several pairs of European languages), and describe the domain, topic, or genre covered."],example:["The ELI5 dataset is an English-language dataset of questions and answers gathered from three subreddits were users ask factual questions requiring paragraph-length or longer answers. The dataset was created to support the task of open-domain long form abstractive question answering, and covers questions about general topics in its [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) subset, science in it [r/askscience](https://www.reddit.com/r/askscience/) subset, and History in its [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subset."]},supportedTasks:{paragraph:["For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`).","- `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name)."],example:["- `abstractive-qa`, `open-domain-qa`: The dataset can be used to train a model for Open Domain Long Form Question Answering. An LFQA model is presented with a non-factoid and asked to retrieve relevant information from a knowledge source (such as [Wikipedia](https://www.wikipedia.org/)), then use it to generate a multi-sentence answer. The model performance is measured by how high its [ROUGE](https://huggingface.co/metrics/rouge) score to the reference is. A [BART-based model](https://huggingface.co/yjernite/bart_eli5) with a [dense retriever](https://huggingface.co/yjernite/retribert-base-uncased) trained to draw information from [Wikipedia passages](https://huggingface.co/datasets/wiki_snippets) achieves a [ROUGE-L of 0.149](https://yjernite.github.io/lfqa.html#generation)."]},languages:{paragraph:["Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,...","When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available."],example:["The text in the dataset is in English, as spoken by Reddit users on the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits. The associated BCP-47 code is `en`."]},dataInstances:{paragraph:["Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples.","\n {\n 'example_field': ...,\n ...\n }\n ","Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit."],example:["A typical data point comprises a question, with a `title` containing the main question and a `selftext` which sometimes elaborates on it, and a list of answers from the forum sorted by the number of upvotes they obtained. Additionally, the URLs in each of the text fields have been extracted to respective lists and replaced by generic tokens in the text.","An example from the ELI5 test set looks as follows:","{'q_id': '8houtx'","'title': 'Why does water heated to room temperature feel colder than the air around it?'","'selftext': ''","'document': ''","'subreddit': 'explainlikeimfive'","'answers': {'a_id': ['dylcnfk', 'dylcj49']","'text': [\"Water transfers heat more efficiently than air. When something feels cold it's because heat is being transferred from your skin to whatever you're touching. Since water absorbs the heat more readily than air, it feels colder.\",\n \"Air isn't as good at transferring heat compared to something like water or steel (sit on a room temperature steel bench vs. a room temperature wooden bench, and the steel one will feel more cold).\n\nWhen you feel cold, what you're feeling is heat being transferred out of you. If there is no breeze, you feel a certain way. If there's a breeze, you will get colder faster (because the moving air is pulling the heat away from you), and if you get into water, its quite good at pulling heat from you. Get out of the water and have a breeze blow on you while you're wet, all of the water starts evaporating, pulling even more heat from you.\"]","'score': [5, 2]}","'title_urls': {'url': []}","'selftext_urls': {'url': []}","'answers_urls': {'url': []}}"]},dataFields:{paragraph:["List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.","- `example_field`: description of `example_field`"],example:["- q_id: a string question identifier for each example, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/submissions/) Reddit submission dumps.","- subreddit: One of explainlikeimfive, askscience, or AskHistorians, indicating which subreddit the question came from","- title: title of the question, with URLs extracted and replaced by URL_n tokens","- title_urls: list of the extracted URLs, the nth element of the list was replaced by URL_n","- selftext: either an empty string or an elaboration of the question","- selftext_urls: similar to title_urls but for self_text","- answers: a list of answers, each answer has:","- a_id: a string answer identifier for each answer, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/comments/) Reddit comments dumps.","- text: the answer text with the URLs normalized","- score: the number of upvotes the answer had received when the dumps were created","- answers_urls: a list of the extracted URLs. All answers use the same list, the numbering of the normalization token continues across answer texts"]},dataSplits:{paragraph:["Describe and name the splits in the dataset if there are more than one.","Describe any criteria for splitting the data, if used. If their are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.","Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:","\tTain\tValid\tTest\n Input Sentences \t\n Average Sentence Length"],example:["The data is split into a training, validation and test set for each of the three subreddits. In order to avoid having duplicate questions in across sets, the `title` field of each of the questions were ranked by their tf-idf match to their nearest neighbor and the ones with the smallest value were used in the test and validation sets. The final split sizes are as follow:","\tTain\tValid\tTest\n r/explainlikeimfive examples\t272634\t9812\t24512\n r/askscience examples\t131778\t2281\t4462\n r/AskHistorians examples\t98525\t4901\t9764"]},curationRationale:{paragraph:["What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together?"],example:["ELI5 was built to provide a testbed for machines to learn how to answer more complex questions, which requires them to find and combine information in a coherent manner. The dataset was built by gathering questions that were asked by community members of three subreddits, including [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), along with the answers that were provided by other users. The [rules of the subreddit](https://www.reddit.com/r/explainlikeimfive/wiki/detailed_rules) make this data particularly well suited to training a model for abstractive question answering: the questions need to seek an objective explanation about well established facts, and the answers provided need to be understandable to a layperson without any particular knowledge domain."]},dataCollection:{paragraph:["Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process.","If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name).","If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used."],example:["The data was obtained by filtering submissions and comments from the subreddits of interest from the XML dumps of the [Reddit forum](https://www.reddit.com/) hosted on [Pushshift.io](https://files.pushshift.io/reddit/).","In order to further improve the quality of the selected examples, only questions with a score of at least 2 and at least one answer with a score of at least 2 were selected for the dataset. The dataset questions and answers span a period form August 2012 to August 2019."]},sourceLanguage:{paragraph:["State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data.","If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.","Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.","Describe other people represented or mentioned in the data. Where possible, link to references for the information."],example:["The language producers are users of the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits between 2012 and 2019. No further demographic information was available from the data source."]},annotations:{paragraph:["If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs."],example:["The dataset does not contain any additional annotations."]},annotationProcess:{paragraph:["If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes."],example:["[N/A]"]},annotators:{paragraph:["If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated.","Describe the people or systems who originally created the annotations and their selection criteria if applicable.","If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.","Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here."],example:["[N/A]"]},personalInformation:{paragraph:["State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).","State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).","If efforts were made to anonymize the data, describe the anonymization process."],example:["The authors removed the speaker IDs from the [Pushshift.io](https://files.pushshift.io/reddit/) dumps but did not otherwise anonymize the data. Some of the questions and answers are about contemporary public figures or individuals who appeared in the news."]},socialImpact:{paragraph:["The purpose of this dataset is to help develop better question answering systems.","The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations.","Please also mention in this section if the proposed dataset contains a *low-resource* or under-represented language."],example:["The purpose of this dataset is to help develop better question answering systems.","A system that succeeds at the supported task would be able to provide a coherent answer to even complex questions requiring a multi-step explanation, which is beyond the ability of even the larger existing models. The task is also thought as a test-bed for retrieval model which can show the users which source text was used in generating the answer and allow them to confirm the information provided to them.","It should be noted however that the provided answers were written by Reddit users, an information which may be lost if models trained on it are deployed in down-stream applications and presented to users without context. The specific biases this may introduce are discussed in the next section."]},biasesDiscussion:{paragraph:["Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact.","For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic.","If analyses have been run quantifying these biases, please add brief summaries and links to the studies here."],example:["While Reddit hosts a number of thriving communities with high quality discussions, it is also widely known to have corners where sexism, hate, and harassment are significant issues. See for example the [recent post from Reddit founder u/spez](https://www.reddit.com/r/announcements/comments/gxas21/upcoming_changes_to_our_content_policy_our_board/) outlining some of the ways he thinks the website's historical policies have been responsible for this problem, [Adrienne Massanari's 2015 article on GamerGate](https://www.researchgate.net/publication/283848479_Gamergate_and_The_Fappening_How_Reddit's_algorithm_governance_and_culture_support_toxic_technocultures) and follow-up works, or a [2019 Wired article on misogyny on Reddit](https://www.wired.com/story/misogyny-reddit-research/).","While there has been some recent work in the NLP community on *de-biasing* models (e.g. [Black is to Criminal as Caucasian is to Police: Detecting and Removing Multiclass Bias in Word Embeddings](https://arxiv.org/abs/1904.04047) for word embeddings trained specifically on Reddit data), this problem is far from solved, and the likelihood that a trained model might learn the biases present in the data remains a significant concern.",'We still note some encouraging signs for all of these communities: [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) and [r/askscience](https://www.reddit.com/r/askscience/) have similar structures and purposes, and [r/askscience](https://www.reddit.com/r/askscience/) was found in 2015 to show medium supportiveness and very low toxicity when compared to other subreddits (see a [hackerfall post](https://hackerfall.com/story/study-and-interactive-visualization-of-toxicity-in), [thecut.com write-up](https://www.thecut.com/2015/03/interactive-chart-of-reddits-toxicity.html) and supporting [data](https://chart-studio.plotly.com/~bsbell21/210/toxicity-vs-supportiveness-by-subreddit/#data)). Meanwhile, the [r/AskHistorians rules](https://www.reddit.com/r/AskHistorians/wiki/rules) mention that the admins will not tolerate "_racism, sexism, or any other forms of bigotry_". However, further analysis of whether and to what extent these rules reduce toxicity is still needed.',"We also note that given the audience of the Reddit website which is more broadly used in the US and Europe, the answers will likely present a Western perspectives, which is particularly important to note when dealing with historical topics."]},limitations:{paragraph:["If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here."],example:["The answers provided in the dataset are represent the opinion of Reddit users. While these communities strive to be helpful, they should not be considered to represent a ground truth."]},datasetCurators:{paragraph:["List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here."],example:["The dataset was initially created by Angela Fan, Ethan Perez, Yacine Jernite, Jason Weston, Michael Auli, and David Grangier, during work done at Facebook AI Research (FAIR)."]},licensingInformation:{paragraph:["Provide the license and link to the license webpage if available."],example:["The licensing status of the dataset hinges on the legal status of the [Pushshift.io](https://files.pushshift.io/reddit/) data which is unclear."]},citationInformation:{paragraph:["Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:","\n @article{article_id,\n author = {Author List},\n title = {Dataset Paper Title},\n journal = {Publication Venue},\n year = {2525}\n }\n ","If the dataset has a [DOI](https://www.doi.org/), please provide it here."],example:["@inproceedings{eli5_lfqa","author = {Angela Fan and","Yacine Jernite and","Ethan Perez and","David Grangier and","Jason Weston and","Michael Auli},","editor = {Anna Korhonen and","David R. Traum and","Llu{'{i}}s M{`{a}}rquez},","title = {{ELI5:} Long Form Question Answering},","booktitle = {Proceedings of the 57th Conference of the Association for Computational","Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,","Volume 1: Long Papers},","pages = {3558--3567},","publisher = {Association for Computational Linguistics},","year = {2019},","url = {https://doi.org/10.18653/v1/p19-1346},","doi = {10.18653/v1/p19-1346}","}"]}}},w=a(27),x=a.n(w),y=a(42);var v=function(){var e=Object(s.useState)(),t=Object(p.a)(e,2),a=t[0],i=t[1],o=Object(s.useState)({}),r=Object(p.a)(o,2),l=r[0],f=r[1],w=Object(s.useState)(!1),v=Object(p.a)(w,2),j=v[0],k=v[1],O=Object(s.useState)(!1),I=Object(p.a)(O,2),C=I[0],N=I[1],D=Object(s.useState)(!1),A=Object(p.a)(D,2),S=A[0],L=A[1],_=Object(s.useState)(!1),T=Object(p.a)(_,2),R=T[0],q=T[1],M=Object(s.useState)(!1),P=Object(p.a)(M,2),F=P[0],E=P[1],W=Object(s.useState)(!1),z=Object(p.a)(W,2),H=z[0],U=z[1],B=Object(s.useState)(!1),G=Object(p.a)(B,2),J=G[0],K=G[1];function Y(e){return V.apply(this,arguments)}function V(){return(V=Object(u.a)(h.a.mark((function e(t){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:i(t.target.id);case 1:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function Q(e){return X.apply(this,arguments)}function X(){return(X=Object(u.a)(h.a.mark((function e(t){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:f(Object(d.a)(Object(d.a)({},l),{},Object(c.a)({},t.target.id,t.currentTarget.value)));case 1:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function Z(){return(Z=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),U(!1),E(!1),q(!1),L(!1),N(!1),k(!j);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function $(){return($=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),U(!1),E(!1),q(!1),L(!1),k(!1),N(!C);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function ee(){return(ee=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),U(!1),E(!1),q(!1),k(!1),N(!1),L(!S);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function te(){return(te=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),U(!1),E(!1),L(!1),k(!1),N(!1),q(!R);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function ae(){return(ae=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),U(!1),L(!1),k(!1),N(!1),q(!1),E(!F);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function ne(){return(ne=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:K(!1),L(!1),k(!1),N(!1),q(!1),E(!1),U(!H);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function se(){return(se=Object(u.a)(h.a.mark((function e(){return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:L(!1),k(!1),N(!1),q(!1),E(!1),U(!1),K(!J);case 7:case"end":return e.stop()}}),e)})))).apply(this,arguments)}function ie(){return(ie=Object(u.a)(h.a.mark((function e(t){var a;return h.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return a="".concat(t.yamlTags?t.yamlTags:"[Needs More Information]","\n\n# Dataset Card for ").concat(t.datasetName?t.datasetName:"[Needs More Information]","\n\n## Table of Contents\n- [Dataset Description](#dataset-description)\n - [Dataset Summary](#dataset-summary)\n - [Supported Tasks](#supported-tasks-and-leaderboards)\n - [Languages](#languages)\n- [Dataset Structure](#dataset-structure)\n - [Data Instances](#data-instances)\n - [Data Fields](#data-instances)\n - [Data Splits](#data-instances)\n- [Dataset Creation](#dataset-creation)\n - [Curation Rationale](#curation-rationale)\n - [Source Data](#source-data)\n - [Annotations](#annotations)\n - [Personal and Sensitive Information](#personal-and-sensitive-information)\n- [Considerations for Using the Data](#considerations-for-using-the-data)\n - [Social Impact of Dataset](#social-impact-of-dataset)\n - [Discussion of Biases](#discussion-of-biases)\n - [Other Known Limitations](#other-known-limitations)\n- [Additional Information](#additional-information)\n - [Dataset Curators](#dataset-curators)\n - [Licensing Information](#licensing-information)\n - [Citation Information](#citation-information)\n\n## Dataset Description\n\n- **Homepage:** ").concat(t.homepage?t.homepage:"[Needs More Information]","\n- **Repository:** ").concat(t.repository?t.repository:"[Needs More Information]","\n- **Paper:** ").concat(t.paper?t.paper:"[Needs More Information]","\n- **Leaderboard:** ").concat(t.leaderboard?t.leaderboard:"[Needs More Information]","\n- **Point of Contact:** ").concat(t.contact?t.contact:"[Needs More Information]","\n\n### Dataset Summary\n\n").concat(t.datasetSummary?t.datasetSummary:"[Needs More Information]","\n\n### Supported Tasks and Leaderboards\n\n").concat(t.supportedTasks?t.supportedTasks:"[Needs More Information]","\n\n### Languages\n\n").concat(t.languages?t.languages:"[Needs More Information]","\n\n## Dataset Structure\n\n### Data Instances\n\n").concat(t.dataInstances?t.dataInstances:"[Needs More Information]","\n\n### Data Fields\n\n").concat(t.dataFields?t.dataFields:"[Needs More Information]","\n\n### Data Splits\n\n").concat(t.dataSplits?t.dataSplits:"[Needs More Information]","\n\n## Dataset Creation\n\n### Curation Rationale\n\n").concat(t.curationRationale?t.curationRationale:"[Needs More Information]","\n\n### Source Data\n\n#### Initial Data Collection and Normalization\n\n").concat(t.dataCollection?t.dataCollection:"[Needs More Information]","\n\n#### Who are the source language producers?\n\n").concat(t.sourceLanguage?t.sourceLanguage:"[Needs More Information]","\n\n### Annotations\n\n#### Annotation process\n\n").concat(t.annotationProcess?t.annotationProcess:"[Needs More Information]","\n\n#### Who are the annotators?\n\n").concat(t.annotators?t.annotators:"[Needs More Information]","\n\n### Personal and Sensitive Information\n\n").concat(t.personalInformation?t.personalInformation:"[Needs More Information]","\n\n## Considerations for Using the Data\n\n### Social Impact of Dataset\n\n").concat(t.socialImpact?t.socialImpact:"[Needs More Information]","\n\n### Discussion of Biases\n\n").concat(t.biasesDiscussion?t.biasesDiscussion:"[Needs More Information]","\n\n### Other Known Limitations\n\n").concat(t.limitations?t.limitations:"[Needs More Information]","\n\n## Additional Information\n\n### Dataset Curators\n\n").concat(t.datasetCurators?t.datasetCurators:"[Needs More Information]","\n\n### Licensing Information\n\n").concat(t.licensingInformation?t.licensingInformation:"[Needs More Information]","\n\n### Citation Information\n\n").concat(t.citationInformation?t.citationInformation:"[Needs More Information]"),e.next=3,Object(y.save)(a,"README.md");case 3:case"end":return e.stop()}}),e)})))).apply(this,arguments)}return Object(n.jsxs)("div",{className:"overflow-y-auto min-h-full font-sans",children:[Object(n.jsxs)("div",{className:"grid grid-cols-12 gap-6",children:[Object(n.jsx)("div",{className:"col-span-8",children:Object(n.jsx)("div",{className:"max-h-screen flex justify-center overflow-hidden bg-white",children:Object(n.jsxs)("div",{className:"xxs:max-w-xxs xs:max-w-xs md:max-w-2xl xl:max-w-4xl xxl:min-w-5xl py-8 px-4 sm:py-12 sm:px-6 lg:px-8",children:[Object(n.jsxs)("div",{className:"text-center",children:[Object(n.jsxs)("p",{className:"mt-1 text-4xl font-extrabold text-gray-700 sm:tracking-tight",children:["New Dataset Card for ",Object(n.jsx)("input",{onChange:function(e){return Q(e)},id:"datasetName",placeholder:"dataset name",maxLength:"200",className:"ml-4 py-4 text-4xl text-gray-600 w-80 border border-solid border-gray-200 border-none h-10 rounded-md shadow"})]}),Object(n.jsx)("p",{className:"max-w-xl mt-2 mx-auto text-lg text-gray-500",children:"Fill in the form below"})]}),Object(n.jsx)("div",{className:"flex justify-end",children:Object(n.jsx)("button",{onClick:function(){return function(e){return ie.apply(this,arguments)}(l)},type:"button",className:"cursor-pointer inline-flex items-center px-3 py-2 border border-solid border-gray-300 shadow-sm text-base leading-4 font-medium rounded-md text-gray-700 bg-white hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-gray-500",children:"Export"})}),Object(n.jsx)("div",{className:"shadow w-full xxs:max-h-xxs md:max-h-xs mb-32 lg:max-h-md xl:max-h-xl xxl:max-h-screen overflow-y-auto rounded-lg mt-4",children:Object(n.jsx)("div",{className:"max-w-7xl px-4 divide-y-2 divide-gray-200 sm:px-6 lg:px-8",children:Object(n.jsx)("div",{className:"",children:Object(n.jsxs)("dl",{className:"space-y-8 divide-y p-6 divide-gray-200",children:[Object(n.jsx)(g,{title:"YAML Tags",section:j,handleSection:function(){return Z.apply(this,arguments)}}),j&&Object(n.jsx)(m,{value:l.yamlTags,title:"YAML tags",id:"yamlTags",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(g,{title:"Urls",section:C,handleSection:function(){return $.apply(this,arguments)}}),C&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{value:l.homepage,title:"Homepage",id:"homepage",rows:2,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.repository,title:"Repository",id:"repository",rows:2,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.paper,title:"Paper",id:"paper",rows:2,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.leaderboard,title:"Leaderboard",id:"leaderboard",rows:2,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.contact,title:"Point of Contact",id:"contact",rows:2,handleClick:Y,handleChange:Q})]}),Object(n.jsx)(g,{title:"Dataset Description",section:S,handleSection:function(){return ee.apply(this,arguments)}}),S&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{value:l.datasetSummary,title:"Dataset Summary",id:"datasetSummary",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.supportedTasks,title:"Supported Tasks and Leaderboards",id:"supportedTasks",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.languages,title:"Languages",id:"languages",rows:6,handleClick:Y,handleChange:Q})]}),Object(n.jsx)(g,{title:"Dataset Structure",section:R,handleSection:function(){return te.apply(this,arguments)}}),R&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{value:l.dataInstances,title:"Data Instances",id:"dataInstances",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.dataFields,title:"Data Fields",id:"dataFields",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.dataSplits,title:"Data Splits",id:"dataSplits",rows:6,handleClick:Y,handleChange:Q})]}),Object(n.jsx)(g,{title:"Dataset Creation",section:F,handleSection:function(){return ae.apply(this,arguments)}}),F&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{value:l.curationRationale,title:"Curation Rationale",id:"curationRationale",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.dataCollection,title:"Initial Data Collection and Normalization",id:"dataCollection",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.sourceLanguage,title:"Who are the source language producers?",id:"sourceLanguage",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.annotationProcess,title:"Annotation Process",id:"annotationProcess",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.annotators,title:"Who are the annotators?",id:"annotators",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.personalInformation,title:"Personal and Sensitive Information",id:"personalInformation",rows:6,handleClick:Y,handleChange:Q})]}),Object(n.jsx)(g,{title:"Considerations for Using the Data",section:H,handleSection:function(){return ne.apply(this,arguments)}}),H&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{value:l.socialImpact,title:"Social Impact of Dataset",id:"socialImpact",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.biasesDiscussion,title:"Discussion of Biases",id:"biasesDiscussion",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{value:l.limitations,title:"Other Known Limitations",id:"limitations",rows:6,handleClick:Y,handleChange:Q})]}),Object(n.jsx)(g,{title:"Additional Information",section:J,handleSection:function(){return se.apply(this,arguments)}}),J&&Object(n.jsxs)(n.Fragment,{children:[Object(n.jsx)(m,{title:"Dataset Curators",id:"datasetCurators",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{title:"Licensing Information",id:"licensingInformation",rows:6,handleClick:Y,handleChange:Q}),Object(n.jsx)(m,{title:"Citation Information",id:"citationInformation",rows:6,handleClick:Y,handleChange:Q})]})]})})})})]})})}),Object(n.jsx)("div",{className:"col-span-4",children:Object(n.jsx)("div",{className:"h-screen flex overflow-hidden bg-gray-100",children:Object(n.jsxs)("div",{className:"max-w-7xl mx-auto w-full py-8 px-4 sm:py-12",children:[Object(n.jsxs)("div",{className:"text-center",children:[Object(n.jsx)("p",{className:"mt-1 text-4xl font-extrabold text-gray-700 sm:tracking-tight",children:"Information"}),!a&&Object(n.jsx)("p",{className:"max-w-xl mt-5 mx-auto text-lg text-gray-500",children:"Click on a field to see instructions & example"})]}),a&&Object(n.jsxs)("div",{className:"max-w-7xl xxs:max-h-xxs md:max-h-xs mb-32 lg:max-h-md xl:max-h-xl xxl:max-h-screen overflow-y-auto text-left mx-auto py-12 px-4 divide-y-2 divide-gray-200 sm:px-6 lg:py-16 lg:px-8",children:[Object(n.jsx)("div",{className:"mt-12",children:Object(n.jsxs)("dl",{className:"space-y-8 divide-gray-200 text-gray-600 text-left",children:[Object(n.jsx)("p",{className:"mt-1 text-xl font-extrabold text-gray-700 sm:tracking-tight",children:"Instructions"}),b.instructions[a]&&b.instructions[a].paragraph.map((function(e){return Object(n.jsx)("div",{children:Object(n.jsx)(x.a,{source:e,renderers:{link:function(e){return Object(n.jsx)("a",{href:e.href,target:"_blank",children:e.children})}}})},e)}))]})}),Object(n.jsx)("div",{className:"mt-12",children:Object(n.jsxs)("dl",{className:"space-y-8 divide-gray-200 text-gray-600 text-left",children:[Object(n.jsx)("p",{className:"mt-1 text-xl font-extrabold text-gray-700 sm:tracking-tight",children:Object(n.jsx)(x.a,{renderers:{link:function(e){return Object(n.jsx)("a",{href:e.href,target:"_blank",children:e.children})}},source:"Example from the [ELI5 dataset card](https://github.com/huggingface/datasets/blob/master/datasets/eli5/README.md)"})}),Object(n.jsx)("div",{className:"",children:b.instructions[a]&&b.instructions[a].example.map((function(e,t){return Object(n.jsx)("div",{className:"mt-2",children:e},e+t)}))})]})})]}),Object(n.jsxs)("div",{className:"absolute bottom-0 text-xs left-0 ml-4 text-gray-500",children:["developed by",Object(n.jsx)("a",{className:"ml-1 no-underline text-gray-500",href:"https://huggingface.co/evrardts",target:"_blank",children:"Evrard t'Serstevens"})]})]})})})]}),Object(n.jsx)("style",{children:"\n .borders {\n border-bottom: solid 1px;\n border-color: #e2e8f0;\n }\n "})]})},j=function(e){e&&e instanceof Function&&a.e(3).then(a.bind(null,140)).then((function(t){var a=t.getCLS,n=t.getFID,s=t.getFCP,i=t.getLCP,o=t.getTTFB;a(e),n(e),s(e),i(e),o(e)}))};r.a.render(Object(n.jsx)(i.a.StrictMode,{children:Object(n.jsx)(v,{})}),document.getElementById("root")),j()},48:function(e,t,a){}},[[139,1,2]]]);
2
+ //# sourceMappingURL=main.cbff00b9.chunk.js.map
build/static/js/main.cbff00b9.chunk.js.map ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":3,"sources":["InputField.js","Section.js","Instructions.js","App.js","reportWebVitals.js","index.js"],"names":["InputField","value","title","id","rows","handleClick","handleChange","className","onClick","e","onChange","name","Section","section","handleSection","instructions","yamlTags","paragraph","example","homepage","repository","paper","leaderboard","contact","datasetSummary","supportedTasks","languages","dataInstances","dataFields","dataSplits","curationRationale","dataCollection","sourceLanguage","annotations","annotationProcess","annotators","personalInformation","socialImpact","biasesDiscussion","limitations","datasetCurators","licensingInformation","citationInformation","App","useState","fieldFocussed","setFieldFocussed","card","setCard","tagsSection","setTagsSection","urlsSection","setUrlsSection","datasetDescriptionSection","setDatasetDescriptionSection","datasetStructureSection","setDatasetStructureSection","datasetCreationSection","setDatasetCreationSection","considerationsSection","setConsiderationsSection","additionalInformationSection","setAdditionalInformationSection","a","target","currentTarget","textTest","datasetName","save","placeholder","maxLength","exportFile","type","Instructions","map","para","source","renderers","link","props","href","children","ex","index","reportWebVitals","onPerfEntry","Function","then","getCLS","getFID","getFCP","getLCP","getTTFB","ReactDOM","render","StrictMode","document","getElementById"],"mappings":"qNAEe,SAASA,EAAT,GAA4E,IAAtDC,EAAqD,EAArDA,MAAOC,EAA8C,EAA9CA,MAAOC,EAAuC,EAAvCA,GAAIC,EAAmC,EAAnCA,KAAMC,EAA6B,EAA7BA,YAAaC,EAAgB,EAAhBA,aAExE,OACE,sBAAKC,UAAU,GAAf,UACE,qBAAKA,UAAU,+CAAf,SACGL,IAEH,qBAAKK,UAAU,YAAf,SACE,0BAAUN,MAAOA,EAAOO,QAAS,SAACC,GAAD,OAAOJ,EAAYI,IAAIC,SAAU,SAACD,GAAD,OAAOH,EAAaG,IAAIN,GAAIA,EAAIQ,KAAMR,EAAIC,KAAMA,EAAMG,UAAU,sH,YCP3H,SAASK,EAAT,GAAqD,IAAlCV,EAAiC,EAAjCA,MAAOW,EAA0B,EAA1BA,QAASC,EAAiB,EAAjBA,cAEhD,OACE,qCACA,qBAAKP,UAAU,OAAf,SACA,sBAAKC,QAAS,kBAAMM,KAAiBP,UAAU,0FAA/C,UACA,sBAAKA,UAAU,GAAf,cAAoBL,EAApB,OACCW,EACD,cAAC,IAAD,CAAaN,UAAU,UAEvB,cAAC,IAAD,CAAeA,UAAU,cAIzB,sICjBJ,IAEe,GACXI,KAHS,eAITI,aAAc,CACRC,SAAU,CACRC,UAAW,CACT,iBAEFC,QAAS,CACP,MADO,6TAwBXC,SAAU,CACRF,UAAW,CACT,wEAEFC,QAAS,CACP,0EAGJE,WAAY,CACVH,UAAW,CACT,6EAEFC,QAAS,CACP,gEAGJG,MAAO,CACLJ,UAAW,CACT,uJAEFC,QAAS,CACP,2EAGJI,YAAa,CACXL,UAAW,CACT,gEAEFC,QAAS,CACP,UAGJK,QAAS,CACPN,UAAW,CACT,2GAEFC,QAAS,CACP,mDAGJM,eAAgB,CACdP,UAAW,CACT,4VAEFC,QAAS,CACP,6jBAGJO,eAAgB,CACdR,UAAW,CACT,sVACA,gtBAEFC,QAAS,CACP,uxBAGJQ,UAAW,CACTT,UAAW,CACT,wMACA,iVAEFC,QAAS,CACP,sTAGJS,cAAe,CACbV,UAAW,CACT,kJADS,iHAQT,gNAEFC,QAAS,CACP,uWACA,sDAFO,6tCAiBXU,WAAY,CACVX,UAAW,CACT,2fACA,qDAEFC,QAAS,CAAC,gLAAD,q6BAcXW,WAAY,CACVZ,UAAW,CACT,0EACA,sSACA,8IAHS,gGAQXC,QAAS,CACP,wXADO,6MAQXY,kBAAmB,CACjBb,UAAW,CACT,gJAEFC,QAAS,CACP,uxBAGJa,eAAgB,CACdd,UAAW,CACT,6MACA,iKACA,+IAEFC,QAAQ,CACN,8NACA,mRAGJc,eAAgB,CACdf,UAAW,CACT,sIACA,kUACA,uQACA,uHAEFC,QAAS,CACT,8UAGFe,YAAa,CACXhB,UAAW,CACT,qIAEFC,QAAS,CACP,6DAGJgB,kBAAmB,CACjBjB,UAAW,CACT,oTAEFC,QAAS,CACP,UAGJiB,WAAY,CACVlB,UAAW,CACT,4KACA,oHACA,wTACA,2QAEFC,QAAS,CACP,UAGJkB,oBAAqB,CACnBnB,UAAW,CACT,qgBACA,kXACA,mFAEFC,QAAS,CACP,qQAGJmB,aAAc,CACZpB,UAAW,CACT,oFACA,kaACA,wHAEFC,QAAS,CACP,oFACA,4ZACA,2SAGJoB,iBAAkB,CAChBrB,UAAW,CACT,sJACA,4PACA,iHAEFC,QAAS,CACP,4xBACA,ybAFO,i/BAIP,qPAGJqB,YAAa,CACXtB,UAAW,CACT,+IAEFC,QAAS,CACP,4LAGJsB,gBAAiB,CACfvB,UAAW,CACT,kIAEFC,QAAS,CACP,mLAGJuB,qBAAsB,CACpBxB,UAAW,CACT,qEAEFC,QAAS,CACP,oJAGJwB,oBAAqB,CACnBzB,UAAW,CACT,iGADS,8PAUT,6EAEFC,QAAS,CAAC,2BAAD,4oB,yBCqFNyB,MAjXf,WAAgB,IAAD,EAE6BC,qBAF7B,mBAENC,EAFM,KAESC,EAFT,OAGWF,mBAAS,IAHpB,mBAGNG,EAHM,KAGAC,EAHA,OAIyBJ,oBAAS,GAJlC,mBAINK,EAJM,KAIOC,EAJP,OAKyBN,oBAAS,GALlC,mBAKNO,EALM,KAKOC,EALP,OAMqDR,oBAAS,GAN9D,mBAMNS,EANM,KAMqBC,EANrB,OAOiDV,oBAAS,GAP1D,mBAONW,EAPM,KAOmBC,EAPnB,OAQ+CZ,oBAAS,GARxD,mBAQNa,EARM,KAQkBC,EARlB,OAS6Cd,oBAAS,GATtD,mBASNe,EATM,KASiBC,EATjB,OAU2DhB,oBAAS,GAVpE,mBAUNiB,EAVM,KAUwBC,EAVxB,cAYEzD,EAZF,8EAYb,WAA2BI,GAA3B,SAAAsD,EAAA,sDACEjB,EAAiBrC,EAAEuD,OAAO7D,IAD5B,4CAZa,+BAgBEG,EAhBF,8EAgBb,WAA4BG,GAA5B,SAAAsD,EAAA,sDACEf,EAAQ,2BAAID,GAAL,kBAAYtC,EAAEuD,OAAO7D,GAAIM,EAAEwD,cAAchE,SADlD,4CAhBa,kEAoBb,sBAAA8D,EAAA,sDACED,GAAgC,GAChCF,GAAyB,GACzBF,GAA0B,GAC1BF,GAA2B,GAC3BF,GAA6B,GAC7BF,GAAe,GACfF,GAAgBD,GAPlB,4CApBa,kEA8Bb,sBAAAc,EAAA,sDACED,GAAgC,GAChCF,GAAyB,GACzBF,GAA0B,GAC1BF,GAA2B,GAC3BF,GAA6B,GAC7BJ,GAAe,GACfE,GAAgBD,GAPlB,4CA9Ba,oEAwCb,sBAAAY,EAAA,sDACED,GAAgC,GAChCF,GAAyB,GACzBF,GAA0B,GAC1BF,GAA2B,GAC3BN,GAAe,GACfE,GAAe,GACfE,GAA8BD,GAPhC,4CAxCa,oEAkDb,sBAAAU,EAAA,sDACED,GAAgC,GAChCF,GAAyB,GACzBF,GAA0B,GAC1BJ,GAA6B,GAC7BJ,GAAe,GACfE,GAAe,GACfI,GAA4BD,GAP9B,4CAlDa,oEA4Db,sBAAAQ,EAAA,sDACED,GAAgC,GAChCF,GAAyB,GACzBN,GAA6B,GAC7BJ,GAAe,GACfE,GAAe,GACfI,GAA2B,GAC3BE,GAA2BD,GAP7B,4CA5Da,oEAsEb,sBAAAM,EAAA,sDACED,GAAgC,GAChCR,GAA6B,GAC7BJ,GAAe,GACfE,GAAe,GACfI,GAA2B,GAC3BE,GAA0B,GAC1BE,GAA0BD,GAP5B,4CAtEa,oEAgFb,sBAAAI,EAAA,sDACET,GAA6B,GAC7BJ,GAAe,GACfE,GAAe,GACfI,GAA2B,GAC3BE,GAA0B,GAC1BE,GAAyB,GACzBE,GAAiCD,GAPnC,4CAhFa,oEA0Fb,WAA0Bd,GAA1B,eAAAgB,EAAA,6DACMG,EADN,UAECnB,EAAK/B,SAAW+B,EAAK/B,SAAW,2BAFjC,kCAImB+B,EAAKoB,YAAcpB,EAAKoB,YAAc,2BAJzD,6jCA+BgBpB,EAAK5B,SAAW4B,EAAK5B,SAAW,2BA/BhD,+BAgCkB4B,EAAK3B,WAAa2B,EAAK3B,WAAa,2BAhCtD,0BAiCa2B,EAAK1B,MAAQ0B,EAAK1B,MAAQ,2BAjCvC,gCAkCmB0B,EAAKzB,YAAcyB,EAAKzB,YAAc,2BAlCzD,qCAmCwByB,EAAKxB,QAAUwB,EAAKxB,QAAU,2BAnCtD,sCAuCAwB,EAAKvB,eAAiBuB,EAAKvB,eAAgB,2BAvC3C,uDA2CAuB,EAAKtB,eAAiBsB,EAAKtB,eAAiB,2BA3C5C,gCA+CAsB,EAAKrB,UAAYqB,EAAKrB,UAAY,2BA/ClC,6DAqDAqB,EAAKpB,cAAgBoB,EAAKpB,cAAgB,2BArD1C,kCAyDAoB,EAAKnB,WAAamB,EAAKnB,WAAa,2BAzDpC,kCA6DAmB,EAAKlB,WAAakB,EAAKlB,WAAa,2BA7DpC,gEAmEAkB,EAAKjB,kBAAoBiB,EAAKjB,kBAAoB,2BAnElD,oFAyEAiB,EAAKhB,eAAiBgB,EAAKhB,eAAiB,2BAzE5C,8DA6EAgB,EAAKf,eAAiBe,EAAKf,eAAiB,2BA7E5C,6DAmFAe,EAAKb,kBAAoBa,EAAKb,kBAAoB,2BAnFlD,+CAuFAa,EAAKZ,WAAaY,EAAKZ,WAAa,2BAvFpC,yDA2FAY,EAAKX,oBAAsBW,EAAKX,oBAAsB,2BA3FtD,uFAiGAW,EAAKV,aAAeU,EAAKV,aAAe,2BAjGxC,2CAqGAU,EAAKT,iBAAmBS,EAAKT,iBAAmB,2BArGhD,8CAyGAS,EAAKR,YAAcQ,EAAKR,YAAc,2BAzGtC,oEA+GAQ,EAAKP,gBAAkBO,EAAKP,gBAAkB,2BA/G9C,4CAmHAO,EAAKN,qBAAuBM,EAAKN,qBAAuB,2BAnHxD,2CAuHAM,EAAKL,oBAAsBK,EAAKL,oBAAsB,4BAvHtD,SAwHQ0B,eAAKF,EAAU,aAxHvB,4CA1Fa,sBAsNb,OACE,sBAAK3D,UAAU,uCAAf,UACE,sBAAKA,UAAU,0BAAf,UACE,qBAAKA,UAAU,aAAf,SACE,qBAAKA,UAAU,4DAAf,SACE,sBAAKA,UAAU,uGAAf,UACE,sBAAKA,UAAU,cAAf,UACE,oBAAGA,UAAU,+DAAb,kCAAiG,uBAAOG,SAAU,SAACD,GAAD,OAAOH,EAAaG,IAAKN,GAAG,cAAckE,YAAY,eAAeC,UAAU,MAAM/D,UAAU,oHACjN,mBAAGA,UAAU,8CAAb,uCAEF,qBAAKA,UAAU,mBAAf,SACE,wBAAQC,QAAS,kBAjOlB,4CAiOwB+D,CAAWxB,IAAOyB,KAAK,SAASjE,UAAU,6PAAjE,sBAIF,qBAAKA,UAAU,yHAAf,SACE,qBAAKA,UAAU,4DAAf,SACE,qBAAKA,UAAU,GAAf,SACE,qBAAIA,UAAU,yCAAd,UAEE,cAACK,EAAD,CAASV,MAAO,YAAaW,QAASoC,EAAanC,cA1O1D,6CA4OQmC,GACD,cAACjD,EAAD,CAAYC,MAAO8C,EAAK/B,SAAUd,MAAO,YAAaC,GAAI,WAAYC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAGvH,cAACM,EAAD,CAASV,MAAO,OAAQW,QAASsC,EAAarC,cAhPrD,6CAkPQqC,GACD,qCACA,cAACnD,EAAD,CAAYC,MAAO8C,EAAK5B,SAAUjB,MAAO,WAAYC,GAAI,WAAYC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACtH,cAACN,EAAD,CAAYC,MAAO8C,EAAK3B,WAAYlB,MAAO,aAAcC,GAAI,aAAcC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC5H,cAACN,EAAD,CAAYC,MAAO8C,EAAK1B,MAAOnB,MAAO,QAASC,GAAI,QAASC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC7G,cAACN,EAAD,CAAYC,MAAO8C,EAAKzB,YAAapB,MAAO,cAAeC,GAAI,cAAeC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC/H,cAACN,EAAD,CAAYC,MAAO8C,EAAKxB,QAASrB,MAAO,mBAAoBC,GAAI,UAAWC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,OAI5H,cAACM,EAAD,CAASV,MAAO,sBAAuBW,QAASwC,EAA2BvC,cA5PlF,8CA8PQuC,GACD,qCACA,cAACrD,EAAD,CAAYC,MAAO8C,EAAKvB,eAAgBtB,MAAO,kBAAmBC,GAAI,iBAAkBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACzI,cAACN,EAAD,CAAYC,MAAO8C,EAAKtB,eAAgBvB,MAAO,mCAAoCC,GAAI,iBAAkBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC1J,cAACN,EAAD,CAAYC,MAAO8C,EAAKrB,UAAWxB,MAAO,YAAaC,GAAI,YAAaC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,OAIzH,cAACM,EAAD,CAASV,MAAO,oBAAqBW,QAAS0C,EAAyBzC,cAtQ9E,8CAwQQyC,GACD,qCACA,cAACvD,EAAD,CAAYC,MAAO8C,EAAKpB,cAAezB,MAAO,iBAAkBC,GAAI,gBAAiBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACtI,cAACN,EAAD,CAAYC,MAAO8C,EAAKnB,WAAY1B,MAAO,cAAeC,GAAI,aAAcC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC7H,cAACN,EAAD,CAAYC,MAAO8C,EAAKlB,WAAY3B,MAAO,cAAeC,GAAI,aAAcC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,OAI7H,cAACM,EAAD,CAASV,MAAO,mBAAoBW,QAAS4C,EAAwB3C,cAhR5E,8CAkRQ2C,GACD,qCACA,cAACzD,EAAD,CAAYC,MAAO8C,EAAKjB,kBAAmB5B,MAAO,qBAAsBC,GAAI,oBAAqBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAClJ,cAACN,EAAD,CAAYC,MAAO8C,EAAKhB,eAAgB7B,MAAO,4CAA6CC,GAAI,iBAAkBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACnK,cAACN,EAAD,CAAYC,MAAO8C,EAAKf,eAAgB9B,MAAO,yCAA0CC,GAAI,iBAAkBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAChK,cAACN,EAAD,CAAYC,MAAO8C,EAAKb,kBAAmBhC,MAAO,qBAAsBC,GAAI,oBAAqBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAClJ,cAACN,EAAD,CAAYC,MAAO8C,EAAKZ,WAAYjC,MAAO,0BAA2BC,GAAI,aAAcC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACzI,cAACN,EAAD,CAAYC,MAAO8C,EAAKX,oBAAqBlC,MAAO,qCAAsCC,GAAI,sBAAuBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,OAItK,cAACM,EAAD,CAASV,MAAO,oCAAqCW,QAAS8C,EAAuB7C,cA7R5F,8CA+RQ6C,GACD,qCACA,cAAC3D,EAAD,CAAYC,MAAO8C,EAAKV,aAAcnC,MAAO,2BAA4BC,GAAI,eAAgBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC9I,cAACN,EAAD,CAAYC,MAAO8C,EAAKT,iBAAkBpC,MAAO,uBAAwBC,GAAI,mBAAoBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAClJ,cAACN,EAAD,CAAYC,MAAO8C,EAAKR,YAAarC,MAAO,0BAA2BC,GAAI,cAAeC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,OAI3I,cAACM,EAAD,CAASV,MAAO,yBAA0BW,QAASgD,EAA8B/C,cAvSxF,8CAySQ+C,GACD,qCACA,cAAC7D,EAAD,CAAYE,MAAO,mBAAoBC,GAAI,kBAAmBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IAC/G,cAACN,EAAD,CAAYE,MAAO,wBAAyBC,GAAI,uBAAwBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,IACzH,cAACN,EAAD,CAAYE,MAAO,uBAAwBC,GAAI,sBAAuBC,KAAM,EAAGC,YAAaA,EAAaC,aAAcA,uBAWrI,qBAAKC,UAAU,aAAf,SACA,qBAAKA,UAAU,4CAAf,SACE,sBAAKA,UAAU,8CAAf,UACE,sBAAKA,UAAU,cAAf,UACE,mBAAGA,UAAU,+DAAb,0BACEsC,GACF,mBAAGtC,UAAU,8CAAb,+DAGDsC,GACD,sBAAKtC,UAAU,sLAAf,UACI,qBAAKA,UAAU,QAAf,SACE,qBAAIA,UAAU,oDAAd,UACA,mBAAGA,UAAU,8DAAb,0BACCkE,EAAa1D,aAAa8B,IAAkB4B,EAAa1D,aAAa8B,GAAe5B,UAAUyD,KAAI,SAACC,GAAD,OAClG,8BACE,cAAC,IAAD,CAAeC,OAAQD,EACvBE,UAAW,CAACC,KAAM,SAAAC,GAAK,OAAI,mBAAGC,KAAMD,EAAMC,KAAMhB,OAAO,SAA5B,SAAsCe,EAAME,gBAF/DN,WAQd,qBAAKpE,UAAU,QAAf,SACE,qBAAIA,UAAU,oDAAd,UACA,mBAAGA,UAAU,8DAAb,SAA2E,cAAC,IAAD,CAAesE,UAAW,CAACC,KAAM,SAAAC,GAAK,OAAI,mBAAGC,KAAMD,EAAMC,KAAMhB,OAAO,SAA5B,SAAsCe,EAAME,aAAgBL,OAAQ,wHACzL,qBAAKrE,UAAU,GAAf,SACCkE,EAAa1D,aAAa8B,IAAkB4B,EAAa1D,aAAa8B,GAAe3B,QAAQwD,KAAI,SAACQ,EAAIC,GAAL,OAChG,qBAAK5E,UAAU,OAAf,SACG2E,GADwBA,EAAGC,gBAStC,sBAAK5E,UAAU,sDAAf,yBAEE,mBAAGA,UAAU,kCAAkCyE,KAAM,kCAAmChB,OAAO,SAA/F,8CAQR,iJCnWSoB,EAZS,SAAAC,GAClBA,GAAeA,aAAuBC,UACxC,8BAAqBC,MAAK,YAAkD,IAA/CC,EAA8C,EAA9CA,OAAQC,EAAsC,EAAtCA,OAAQC,EAA8B,EAA9BA,OAAQC,EAAsB,EAAtBA,OAAQC,EAAc,EAAdA,QAC3DJ,EAAOH,GACPI,EAAOJ,GACPK,EAAOL,GACPM,EAAON,GACPO,EAAQP,OCDdQ,IAASC,OACP,cAAC,IAAMC,WAAP,UACE,cAAC,EAAD,MAEFC,SAASC,eAAe,SAM1Bb,K","file":"static/js/main.cbff00b9.chunk.js","sourcesContent":["import React from 'react';\n\nexport default function InputField({ value, title, id, rows, handleClick, handleChange }) {\n\n return (\n <div className=\"\">\n <div className=\"text-base font-normal max-w-40 text-gray-600\">\n {title}\n </div>\n <div className=\"mt-2 mr-4\">\n <textarea value={value} onClick={(e) => handleClick(e)} onChange={(e) => handleChange(e)} id={id} name={id} rows={rows} className=\"font-sans p-2 shadow-sm border border-solid border-gray-300 block w-full text-gray-600 sm:text-sm rounded-md\"></textarea>\n </div>\n </div>\n );\n}","import React from 'react';\nimport {BsChevronDown, BsChevronUp} from 'react-icons/bs'\n\nexport default function Section({ title, section, handleSection }) {\n\n return (\n <>\n <div className=\"mt-1\">\n <div onClick={() => handleSection()} className=\"cursor-pointer flex justify-between inline-block pt-6 borders font-medium text-gray-700\">\n <div className=\"\"> {title} </div>\n {section ?\n <BsChevronUp className=\"ml-2 \" />\n :\n <BsChevronDown className=\"ml-2\" />\n }\n </div>\n </div>\n <style>{`\n .borders {\n border-bottom: solid 1px;\n border-color: #e2e8f0;\n }\n `}</style>\n </>\n );\n}","const NAME = 'Instructions'\n\nexport default {\n name: NAME,\n instructions: {\n yamlTags: {\n paragraph: [\n \"Add YAML tags\"\n ],\n example: [\n \"---\",\n `annotations_creators:`,\n `- no-annotation`,\n `language_creators:`,\n `- found`,\n `languages:`,\n `- en`,\n `licenses:`,\n `- unknown`,\n `multilinguality:`,\n `- monolingual`,\n `size_categories:`,\n `- 100K<n<1M`,\n `source_datasets:`,\n `- original`,\n `task_categories:`,\n `- question-answering`,\n `task_ids:`,\n `- abstractive-qa`,\n `- open-domain-qa`,\n `---`,\n ]\n },\n homepage: {\n paragraph: [\n \"Add homepage URL here if available (unless it's a GitHub repository)\"\n ],\n example: [\n \"[ELI5 homepage](https://facebookresearch.github.io/ELI5/explore.html)\"\n ]\n },\n repository: {\n paragraph: [\n \"If the dataset is hosted on github or has a github homepage, add URL here\"\n ],\n example: [\n \"[ELI5 repository](https://github.com/facebookresearch/ELI5)\"\n ]\n }, \n paper: {\n paragraph: [\n \"If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)\"\n ],\n example: [\n \"[ELI5: Long Form Question Answering](https://arxiv.org/abs/1907.09190)\"\n ]\n }, \n leaderboard: {\n paragraph: [\n \"If the dataset supports an active leaderboard, add link here\"\n ],\n example: [\n \"[N/A]\"\n ]\n }, \n contact: {\n paragraph: [\n \"If known, name and email of at least one person the reader can contact for questions about the dataset.\"\n ],\n example: [\n \"[Yacine Jernite](mailto:yacine@huggingface.co)\"\n ]\n }, \n datasetSummary: {\n paragraph: [\n \"Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. translations between several pairs of European languages), and describe the domain, topic, or genre covered.\"\n ],\n example: [\n \"The ELI5 dataset is an English-language dataset of questions and answers gathered from three subreddits were users ask factual questions requiring paragraph-length or longer answers. The dataset was created to support the task of open-domain long form abstractive question answering, and covers questions about general topics in its [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) subset, science in it [r/askscience](https://www.reddit.com/r/askscience/) subset, and History in its [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subset.\"\n ]\n },\n supportedTasks: {\n paragraph: [\n \"For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`).\",\n \"- `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name).\"\n ],\n example: [\n \"- `abstractive-qa`, `open-domain-qa`: The dataset can be used to train a model for Open Domain Long Form Question Answering. An LFQA model is presented with a non-factoid and asked to retrieve relevant information from a knowledge source (such as [Wikipedia](https://www.wikipedia.org/)), then use it to generate a multi-sentence answer. The model performance is measured by how high its [ROUGE](https://huggingface.co/metrics/rouge) score to the reference is. A [BART-based model](https://huggingface.co/yjernite/bart_eli5) with a [dense retriever](https://huggingface.co/yjernite/retribert-base-uncased) trained to draw information from [Wikipedia passages](https://huggingface.co/datasets/wiki_snippets) achieves a [ROUGE-L of 0.149](https://yjernite.github.io/lfqa.html#generation).\"\n ]\n },\n languages: {\n paragraph: [\n \"Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,...\",\n \"When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available.\"\n ],\n example: [\n \"The text in the dataset is in English, as spoken by Reddit users on the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits. The associated BCP-47 code is `en`.\"\n ]\n },\n dataInstances: {\n paragraph: [\n \"Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples.\",\n `\n {\n 'example_field': ...,\n ...\n }\n `,\n \"Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit.\",\n ],\n example: [\n \"A typical data point comprises a question, with a `title` containing the main question and a `selftext` which sometimes elaborates on it, and a list of answers from the forum sorted by the number of upvotes they obtained. Additionally, the URLs in each of the text fields have been extracted to respective lists and replaced by generic tokens in the text.\",\n \"An example from the ELI5 test set looks as follows:\",\n `{'q_id': '8houtx'`,\n `'title': 'Why does water heated to room temperature feel colder than the air around it?'`,\n `'selftext': ''`,\n `'document': ''`,\n `'subreddit': 'explainlikeimfive'`,\n `'answers': {'a_id': ['dylcnfk', 'dylcj49']`,\n `'text': [\"Water transfers heat more efficiently than air. When something feels cold it's because heat is being transferred from your skin to whatever you're touching. Since water absorbs the heat more readily than air, it feels colder.\",\n \"Air isn't as good at transferring heat compared to something like water or steel (sit on a room temperature steel bench vs. a room temperature wooden bench, and the steel one will feel more cold).\\n\\nWhen you feel cold, what you're feeling is heat being transferred out of you. If there is no breeze, you feel a certain way. If there's a breeze, you will get colder faster (because the moving air is pulling the heat away from you), and if you get into water, its quite good at pulling heat from you. Get out of the water and have a breeze blow on you while you're wet, all of the water starts evaporating, pulling even more heat from you.\"]`,\n `'score': [5, 2]}`,\n `'title_urls': {'url': []}`,\n `'selftext_urls': {'url': []}`,\n `'answers_urls': {'url': []}}`,\n ]\n },\n dataFields: {\n paragraph: [\n \"List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.\",\n \"- `example_field`: description of `example_field`\"\n ], \n example: [\n `- q_id: a string question identifier for each example, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/submissions/) Reddit submission dumps.`,\n `- subreddit: One of explainlikeimfive, askscience, or AskHistorians, indicating which subreddit the question came from`,\n `- title: title of the question, with URLs extracted and replaced by URL_n tokens`,\n `- title_urls: list of the extracted URLs, the nth element of the list was replaced by URL_n`,\n `- selftext: either an empty string or an elaboration of the question`,\n `- selftext_urls: similar to title_urls but for self_text`,\n `- answers: a list of answers, each answer has:`,\n `- a_id: a string answer identifier for each answer, corresponding to its ID in the [Pushshift.io](https://files.pushshift.io/reddit/comments/) Reddit comments dumps.`,\n `- text: the answer text with the URLs normalized`,\n `- score: the number of upvotes the answer had received when the dumps were created`,\n `- answers_urls: a list of the extracted URLs. All answers use the same list, the numbering of the normalization token continues across answer texts`,\n ]\n },\n dataSplits: {\n paragraph: [\n \"Describe and name the splits in the dataset if there are more than one.\",\n \"Describe any criteria for splitting the data, if used. If their are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.\",\n \"Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:\",\n `\tTain\tValid\tTest\n Input Sentences \t\n Average Sentence Length`,\n ],\n example: [\n \"The data is split into a training, validation and test set for each of the three subreddits. In order to avoid having duplicate questions in across sets, the `title` field of each of the questions were ranked by their tf-idf match to their nearest neighbor and the ones with the smallest value were used in the test and validation sets. The final split sizes are as follow:\",\n `\tTain\tValid\tTest\n r/explainlikeimfive examples\t272634\t9812\t24512\n r/askscience examples\t131778\t2281\t4462\n r/AskHistorians examples\t98525\t4901\t9764`,\n ]\n },\n curationRationale: {\n paragraph: [\n \"What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together?\",\n ],\n example: [\n \"ELI5 was built to provide a testbed for machines to learn how to answer more complex questions, which requires them to find and combine information in a coherent manner. The dataset was built by gathering questions that were asked by community members of three subreddits, including [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), along with the answers that were provided by other users. The [rules of the subreddit](https://www.reddit.com/r/explainlikeimfive/wiki/detailed_rules) make this data particularly well suited to training a model for abstractive question answering: the questions need to seek an objective explanation about well established facts, and the answers provided need to be understandable to a layperson without any particular knowledge domain.\"\n ]\n },\n dataCollection: {\n paragraph: [\n \"Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process.\",\n \"If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name).\",\n \"If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used.\"\n ],\n example:[\n \"The data was obtained by filtering submissions and comments from the subreddits of interest from the XML dumps of the [Reddit forum](https://www.reddit.com/) hosted on [Pushshift.io](https://files.pushshift.io/reddit/).\",\n \"In order to further improve the quality of the selected examples, only questions with a score of at least 2 and at least one answer with a score of at least 2 were selected for the dataset. The dataset questions and answers span a period form August 2012 to August 2019.\"\n ]\n },\n sourceLanguage: {\n paragraph: [\n \"State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data.\",\n \"If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.\",\n \"Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.\",\n \"Describe other people represented or mentioned in the data. Where possible, link to references for the information.\"\n ],\n example: [\n \"The language producers are users of the [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/), [r/askscience](https://www.reddit.com/r/askscience/), and [r/AskHistorians](https://www.reddit.com/r/AskHistorians/) subreddits between 2012 and 2019. No further demographic information was available from the data source.\"\n ]\n },\n annotations: {\n paragraph: [\n \"If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs.\"\n ],\n example: [\n \"The dataset does not contain any additional annotations.\"\n ]\n },\n annotationProcess: {\n paragraph: [\n \"If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes.\"\n ],\n example: [\n \"[N/A]\"\n ]\n },\n annotators: {\n paragraph: [\n \"If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated.\",\n \"Describe the people or systems who originally created the annotations and their selection criteria if applicable.\",\n \"If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.\",\n \"Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.\"\n ],\n example: [\n \"[N/A]\"\n ]\n },\n personalInformation: {\n paragraph: [\n \"State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).\",\n \"State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).\",\n \"If efforts were made to anonymize the data, describe the anonymization process.\"\n ],\n example: [\n \"The authors removed the speaker IDs from the [Pushshift.io](https://files.pushshift.io/reddit/) dumps but did not otherwise anonymize the data. Some of the questions and answers are about contemporary public figures or individuals who appeared in the news.\"\n ]\n },\n socialImpact: {\n paragraph: [\n \"The purpose of this dataset is to help develop better question answering systems.\",\n \"The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations.\",\n \"Please also mention in this section if the proposed dataset contains a *low-resource* or under-represented language.\"\n ],\n example: [\n \"The purpose of this dataset is to help develop better question answering systems.\",\n \"A system that succeeds at the supported task would be able to provide a coherent answer to even complex questions requiring a multi-step explanation, which is beyond the ability of even the larger existing models. The task is also thought as a test-bed for retrieval model which can show the users which source text was used in generating the answer and allow them to confirm the information provided to them.\",\n \"It should be noted however that the provided answers were written by Reddit users, an information which may be lost if models trained on it are deployed in down-stream applications and presented to users without context. The specific biases this may introduce are discussed in the next section.\"\n ]\n },\n biasesDiscussion: {\n paragraph: [\n \"Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact.\",\n \"For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic.\",\n \"If analyses have been run quantifying these biases, please add brief summaries and links to the studies here.\"\n ],\n example: [\n \"While Reddit hosts a number of thriving communities with high quality discussions, it is also widely known to have corners where sexism, hate, and harassment are significant issues. See for example the [recent post from Reddit founder u/spez](https://www.reddit.com/r/announcements/comments/gxas21/upcoming_changes_to_our_content_policy_our_board/) outlining some of the ways he thinks the website's historical policies have been responsible for this problem, [Adrienne Massanari's 2015 article on GamerGate](https://www.researchgate.net/publication/283848479_Gamergate_and_The_Fappening_How_Reddit's_algorithm_governance_and_culture_support_toxic_technocultures) and follow-up works, or a [2019 Wired article on misogyny on Reddit](https://www.wired.com/story/misogyny-reddit-research/).\",\n \"While there has been some recent work in the NLP community on *de-biasing* models (e.g. [Black is to Criminal as Caucasian is to Police: Detecting and Removing Multiclass Bias in Word Embeddings](https://arxiv.org/abs/1904.04047) for word embeddings trained specifically on Reddit data), this problem is far from solved, and the likelihood that a trained model might learn the biases present in the data remains a significant concern.\",\n `We still note some encouraging signs for all of these communities: [r/explainlikeimfive](https://www.reddit.com/r/explainlikeimfive/) and [r/askscience](https://www.reddit.com/r/askscience/) have similar structures and purposes, and [r/askscience](https://www.reddit.com/r/askscience/) was found in 2015 to show medium supportiveness and very low toxicity when compared to other subreddits (see a [hackerfall post](https://hackerfall.com/story/study-and-interactive-visualization-of-toxicity-in), [thecut.com write-up](https://www.thecut.com/2015/03/interactive-chart-of-reddits-toxicity.html) and supporting [data](https://chart-studio.plotly.com/~bsbell21/210/toxicity-vs-supportiveness-by-subreddit/#data)). Meanwhile, the [r/AskHistorians rules](https://www.reddit.com/r/AskHistorians/wiki/rules) mention that the admins will not tolerate \"_racism, sexism, or any other forms of bigotry_\". However, further analysis of whether and to what extent these rules reduce toxicity is still needed.`,\n \"We also note that given the audience of the Reddit website which is more broadly used in the US and Europe, the answers will likely present a Western perspectives, which is particularly important to note when dealing with historical topics.\"\n ]\n },\n limitations: {\n paragraph: [\n \"If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here.\"\n ],\n example: [\n \"The answers provided in the dataset are represent the opinion of Reddit users. While these communities strive to be helpful, they should not be considered to represent a ground truth.\"\n ]\n },\n datasetCurators: {\n paragraph: [\n \"List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here.\"\n ],\n example: [\n \"The dataset was initially created by Angela Fan, Ethan Perez, Yacine Jernite, Jason Weston, Michael Auli, and David Grangier, during work done at Facebook AI Research (FAIR).\"\n ]\n },\n licensingInformation: {\n paragraph: [\n \"Provide the license and link to the license webpage if available.\"\n ],\n example: [\n \"The licensing status of the dataset hinges on the legal status of the [Pushshift.io](https://files.pushshift.io/reddit/) data which is unclear.\"\n ]\n },\n citationInformation: {\n paragraph: [\n \"Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:\",\n `\n @article{article_id,\n author = {Author List},\n title = {Dataset Paper Title},\n journal = {Publication Venue},\n year = {2525}\n }\n `,\n \"If the dataset has a [DOI](https://www.doi.org/), please provide it here.\"\n ],\n example: [\n `@inproceedings{eli5_lfqa`,\n `author = {Angela Fan and`,\n `Yacine Jernite and`,\n `Ethan Perez and`,\n `David Grangier and`,\n `Jason Weston and`,\n `Michael Auli},`,\n `editor = {Anna Korhonen and`,\n `David R. Traum and`,\n `Llu{\\'{\\i}}s M{\\`{a}}rquez},`,\n `title = {{ELI5:} Long Form Question Answering},`,\n `booktitle = {Proceedings of the 57th Conference of the Association for Computational`,\n `Linguistics, {ACL} 2019, Florence, Italy, July 28- August 2, 2019,`,\n `Volume 1: Long Papers},`,\n `pages = {3558--3567},`,\n `publisher = {Association for Computational Linguistics},`,\n `year = {2019},`,\n `url = {https://doi.org/10.18653/v1/p19-1346},`,\n `doi = {10.18653/v1/p19-1346}`,\n `}`,\n ]\n },\n }\n}","import React, { useState } from 'react';\nimport InputField from \"./InputField\"\nimport Section from \"./Section\"\nimport Instructions from './Instructions'\nimport ReactMarkdown from \"react-markdown\";\nimport {save} from 'save-file'\n\n\nfunction App() {\n\n const [fieldFocussed, setFieldFocussed] = useState()\n const [card, setCard] = useState({})\n const [tagsSection, setTagsSection] = useState(false)\n const [urlsSection, setUrlsSection] = useState(false)\n const [datasetDescriptionSection, setDatasetDescriptionSection] = useState(false)\n const [datasetStructureSection, setDatasetStructureSection] = useState(false)\n const [datasetCreationSection, setDatasetCreationSection] = useState(false)\n const [considerationsSection, setConsiderationsSection] = useState(false)\n const [additionalInformationSection, setAdditionalInformationSection] = useState(false)\n\n async function handleClick(e){\n setFieldFocussed(e.target.id)\n }\n\n async function handleChange(e){\n setCard({...card, [e.target.id]:e.currentTarget.value})\n }\n\n async function handleTagsSection(){\n setAdditionalInformationSection(false)\n setConsiderationsSection(false)\n setDatasetCreationSection(false)\n setDatasetStructureSection(false)\n setDatasetDescriptionSection(false)\n setUrlsSection(false)\n setTagsSection(!tagsSection)\n }\n\n async function handleUrlsSection(){\n setAdditionalInformationSection(false)\n setConsiderationsSection(false)\n setDatasetCreationSection(false)\n setDatasetStructureSection(false)\n setDatasetDescriptionSection(false)\n setTagsSection(false)\n setUrlsSection(!urlsSection)\n }\n\n async function handleDatasetDescriptionSection(){\n setAdditionalInformationSection(false)\n setConsiderationsSection(false)\n setDatasetCreationSection(false)\n setDatasetStructureSection(false)\n setTagsSection(false)\n setUrlsSection(false)\n setDatasetDescriptionSection(!datasetDescriptionSection)\n }\n\n async function handleDatasetStructureSection(){\n setAdditionalInformationSection(false)\n setConsiderationsSection(false)\n setDatasetCreationSection(false)\n setDatasetDescriptionSection(false)\n setTagsSection(false)\n setUrlsSection(false)\n setDatasetStructureSection(!datasetStructureSection)\n }\n\n async function handleDatasetCreationSection(){\n setAdditionalInformationSection(false)\n setConsiderationsSection(false)\n setDatasetDescriptionSection(false)\n setTagsSection(false)\n setUrlsSection(false)\n setDatasetStructureSection(false)\n setDatasetCreationSection(!datasetCreationSection)\n }\n\n async function handleConsiderationsSection(){\n setAdditionalInformationSection(false)\n setDatasetDescriptionSection(false)\n setTagsSection(false)\n setUrlsSection(false)\n setDatasetStructureSection(false)\n setDatasetCreationSection(false)\n setConsiderationsSection(!considerationsSection)\n }\n\n async function handleAdditionalInformationSection(){\n setDatasetDescriptionSection(false)\n setTagsSection(false)\n setUrlsSection(false)\n setDatasetStructureSection(false)\n setDatasetCreationSection(false)\n setConsiderationsSection(false)\n setAdditionalInformationSection(!additionalInformationSection)\n }\n\n async function exportFile(card){ \n var textTest = \n`${card.yamlTags ? card.yamlTags : \"[Needs More Information]\"}\n\n# Dataset Card for ${card.datasetName ? card.datasetName : \"[Needs More Information]\"}\n\n## Table of Contents\n- [Dataset Description](#dataset-description)\n - [Dataset Summary](#dataset-summary)\n - [Supported Tasks](#supported-tasks-and-leaderboards)\n - [Languages](#languages)\n- [Dataset Structure](#dataset-structure)\n - [Data Instances](#data-instances)\n - [Data Fields](#data-instances)\n - [Data Splits](#data-instances)\n- [Dataset Creation](#dataset-creation)\n - [Curation Rationale](#curation-rationale)\n - [Source Data](#source-data)\n - [Annotations](#annotations)\n - [Personal and Sensitive Information](#personal-and-sensitive-information)\n- [Considerations for Using the Data](#considerations-for-using-the-data)\n - [Social Impact of Dataset](#social-impact-of-dataset)\n - [Discussion of Biases](#discussion-of-biases)\n - [Other Known Limitations](#other-known-limitations)\n- [Additional Information](#additional-information)\n - [Dataset Curators](#dataset-curators)\n - [Licensing Information](#licensing-information)\n - [Citation Information](#citation-information)\n\n## Dataset Description\n\n- **Homepage:** ${card.homepage ? card.homepage : \"[Needs More Information]\"}\n- **Repository:** ${card.repository ? card.repository : \"[Needs More Information]\"}\n- **Paper:** ${card.paper ? card.paper : \"[Needs More Information]\"}\n- **Leaderboard:** ${card.leaderboard ? card.leaderboard : \"[Needs More Information]\"}\n- **Point of Contact:** ${card.contact ? card.contact : \"[Needs More Information]\"}\n\n### Dataset Summary\n\n${card.datasetSummary ? card.datasetSummary: \"[Needs More Information]\"}\n\n### Supported Tasks and Leaderboards\n\n${card.supportedTasks ? card.supportedTasks : \"[Needs More Information]\"}\n\n### Languages\n\n${card.languages ? card.languages : \"[Needs More Information]\"}\n\n## Dataset Structure\n\n### Data Instances\n\n${card.dataInstances ? card.dataInstances : \"[Needs More Information]\"}\n\n### Data Fields\n\n${card.dataFields ? card.dataFields : \"[Needs More Information]\"}\n\n### Data Splits\n\n${card.dataSplits ? card.dataSplits : \"[Needs More Information]\"}\n\n## Dataset Creation\n\n### Curation Rationale\n\n${card.curationRationale ? card.curationRationale : \"[Needs More Information]\"}\n\n### Source Data\n\n#### Initial Data Collection and Normalization\n\n${card.dataCollection ? card.dataCollection : \"[Needs More Information]\"}\n\n#### Who are the source language producers?\n\n${card.sourceLanguage ? card.sourceLanguage : \"[Needs More Information]\"}\n\n### Annotations\n\n#### Annotation process\n\n${card.annotationProcess ? card.annotationProcess : \"[Needs More Information]\"}\n\n#### Who are the annotators?\n\n${card.annotators ? card.annotators : \"[Needs More Information]\"}\n\n### Personal and Sensitive Information\n\n${card.personalInformation ? card.personalInformation : \"[Needs More Information]\"}\n\n## Considerations for Using the Data\n\n### Social Impact of Dataset\n\n${card.socialImpact ? card.socialImpact : \"[Needs More Information]\"}\n\n### Discussion of Biases\n\n${card.biasesDiscussion ? card.biasesDiscussion : \"[Needs More Information]\"}\n\n### Other Known Limitations\n\n${card.limitations ? card.limitations : \"[Needs More Information]\"}\n\n## Additional Information\n\n### Dataset Curators\n\n${card.datasetCurators ? card.datasetCurators : \"[Needs More Information]\"}\n\n### Licensing Information\n\n${card.licensingInformation ? card.licensingInformation : \"[Needs More Information]\"}\n\n### Citation Information\n\n${card.citationInformation ? card.citationInformation : \"[Needs More Information]\"}`\n await save(textTest, \"README.md\")\n }\n\n\n return (\n <div className=\"overflow-y-auto min-h-full font-sans\">\n <div className=\"grid grid-cols-12 gap-6\">\n <div className=\"col-span-8\">\n <div className=\"max-h-screen flex justify-center overflow-hidden bg-white\">\n <div className=\"xxs:max-w-xxs xs:max-w-xs md:max-w-2xl xl:max-w-4xl xxl:min-w-5xl py-8 px-4 sm:py-12 sm:px-6 lg:px-8\">\n <div className=\"text-center\">\n <p className=\"mt-1 text-4xl font-extrabold text-gray-700 sm:tracking-tight\">New Dataset Card for <input onChange={(e) => handleChange(e)} id=\"datasetName\" placeholder=\"dataset name\" maxLength=\"200\" className=\"ml-4 py-4 text-4xl text-gray-600 w-80 border border-solid border-gray-200 border-none h-10 rounded-md shadow\" /></p>\n <p className=\"max-w-xl mt-2 mx-auto text-lg text-gray-500\">Fill in the form below</p>\n </div>\n <div className=\"flex justify-end\">\n <button onClick={() => exportFile(card)} type=\"button\" className=\"cursor-pointer inline-flex items-center px-3 py-2 border border-solid border-gray-300 shadow-sm text-base leading-4 font-medium rounded-md text-gray-700 bg-white hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-gray-500\">\n Export\n </button>\n </div>\n <div className=\"shadow w-full xxs:max-h-xxs md:max-h-xs mb-32 lg:max-h-md xl:max-h-xl xxl:max-h-screen overflow-y-auto rounded-lg mt-4\">\n <div className=\"max-w-7xl px-4 divide-y-2 divide-gray-200 sm:px-6 lg:px-8\">\n <div className=\"\">\n <dl className=\"space-y-8 divide-y p-6 divide-gray-200\">\n \n <Section title={\"YAML Tags\"} section={tagsSection} handleSection={handleTagsSection} />\n \n {tagsSection &&\n <InputField value={card.yamlTags} title={\"YAML tags\"} id={\"yamlTags\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n }\n\n <Section title={\"Urls\"} section={urlsSection} handleSection={handleUrlsSection} />\n\n {urlsSection && \n <>\n <InputField value={card.homepage} title={\"Homepage\"} id={\"homepage\"} rows={2} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.repository} title={\"Repository\"} id={\"repository\"} rows={2} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.paper} title={\"Paper\"} id={\"paper\"} rows={2} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.leaderboard} title={\"Leaderboard\"} id={\"leaderboard\"} rows={2} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.contact} title={\"Point of Contact\"} id={\"contact\"} rows={2} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n\n <Section title={\"Dataset Description\"} section={datasetDescriptionSection} handleSection={handleDatasetDescriptionSection} />\n \n {datasetDescriptionSection &&\n <>\n <InputField value={card.datasetSummary} title={\"Dataset Summary\"} id={\"datasetSummary\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.supportedTasks} title={\"Supported Tasks and Leaderboards\"} id={\"supportedTasks\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.languages} title={\"Languages\"} id={\"languages\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n\n <Section title={\"Dataset Structure\"} section={datasetStructureSection} handleSection={handleDatasetStructureSection} />\n\n {datasetStructureSection && \n <>\n <InputField value={card.dataInstances} title={\"Data Instances\"} id={\"dataInstances\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.dataFields} title={\"Data Fields\"} id={\"dataFields\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.dataSplits} title={\"Data Splits\"} id={\"dataSplits\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n\n <Section title={\"Dataset Creation\"} section={datasetCreationSection} handleSection={handleDatasetCreationSection} />\n\n {datasetCreationSection &&\n <>\n <InputField value={card.curationRationale} title={\"Curation Rationale\"} id={\"curationRationale\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.dataCollection} title={\"Initial Data Collection and Normalization\"} id={\"dataCollection\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.sourceLanguage} title={\"Who are the source language producers?\"} id={\"sourceLanguage\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.annotationProcess} title={\"Annotation Process\"} id={\"annotationProcess\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.annotators} title={\"Who are the annotators?\"} id={\"annotators\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.personalInformation} title={\"Personal and Sensitive Information\"} id={\"personalInformation\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n \n <Section title={\"Considerations for Using the Data\"} section={considerationsSection} handleSection={handleConsiderationsSection} />\n\n {considerationsSection &&\n <>\n <InputField value={card.socialImpact} title={\"Social Impact of Dataset\"} id={\"socialImpact\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.biasesDiscussion} title={\"Discussion of Biases\"} id={\"biasesDiscussion\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField value={card.limitations} title={\"Other Known Limitations\"} id={\"limitations\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n\n <Section title={\"Additional Information\"} section={additionalInformationSection} handleSection={handleAdditionalInformationSection} />\n \n {additionalInformationSection &&\n <>\n <InputField title={\"Dataset Curators\"} id={\"datasetCurators\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField title={\"Licensing Information\"} id={\"licensingInformation\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n <InputField title={\"Citation Information\"} id={\"citationInformation\"} rows={6} handleClick={handleClick} handleChange={handleChange} />\n </>\n }\n \n </dl>\n </div>\n </div>\n </div> \n </div>\n </div>\n </div>\n <div className=\"col-span-4\">\n <div className=\"h-screen flex overflow-hidden bg-gray-100\">\n <div className=\"max-w-7xl mx-auto w-full py-8 px-4 sm:py-12\">\n <div className=\"text-center\">\n <p className=\"mt-1 text-4xl font-extrabold text-gray-700 sm:tracking-tight\">Information</p>\n {!fieldFocussed &&\n <p className=\"max-w-xl mt-5 mx-auto text-lg text-gray-500\">Click on a field to see instructions & example</p>\n }\n </div>\n {fieldFocussed &&\n <div className=\"max-w-7xl xxs:max-h-xxs md:max-h-xs mb-32 lg:max-h-md xl:max-h-xl xxl:max-h-screen overflow-y-auto text-left mx-auto py-12 px-4 divide-y-2 divide-gray-200 sm:px-6 lg:py-16 lg:px-8\">\n <div className=\"mt-12\">\n <dl className=\"space-y-8 divide-gray-200 text-gray-600 text-left\">\n <p className=\"mt-1 text-xl font-extrabold text-gray-700 sm:tracking-tight\">Instructions</p>\n {Instructions.instructions[fieldFocussed] && Instructions.instructions[fieldFocussed].paragraph.map((para) => (\n <div key={para}>\n <ReactMarkdown source={para}\n renderers={{link: props => <a href={props.href} target=\"_blank\">{props.children}</a>}}\n /> \n </div>\n ))}\n </dl>\n </div>\n <div className=\"mt-12\">\n <dl className=\"space-y-8 divide-gray-200 text-gray-600 text-left\">\n <p className=\"mt-1 text-xl font-extrabold text-gray-700 sm:tracking-tight\"><ReactMarkdown renderers={{link: props => <a href={props.href} target=\"_blank\">{props.children}</a>}} source={\"Example from the [ELI5 dataset card](https://github.com/huggingface/datasets/blob/master/datasets/eli5/README.md)\"}/></p>\n <div className=\"\">\n {Instructions.instructions[fieldFocussed] && Instructions.instructions[fieldFocussed].example.map((ex, index) => (\n <div className=\"mt-2\" key={ex+index}>\n {ex}\n </div>\n ))}\n </div>\n </dl>\n </div> \n </div>\n }\n <div className=\"absolute bottom-0 text-xs left-0 ml-4 text-gray-500\">\n developed by\n <a className=\"ml-1 no-underline text-gray-500\" href={\"https://huggingface.co/evrardts\"} target=\"_blank\">\n Evrard t'Serstevens\n </a>\n </div> \n </div> \n </div>\n </div>\n </div> \n <style>{`\n .borders {\n border-bottom: solid 1px;\n border-color: #e2e8f0;\n }\n `}</style>\n </div>\n );\n}\n\nexport default App;\n","const reportWebVitals = onPerfEntry => {\n if (onPerfEntry && onPerfEntry instanceof Function) {\n import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {\n getCLS(onPerfEntry);\n getFID(onPerfEntry);\n getFCP(onPerfEntry);\n getLCP(onPerfEntry);\n getTTFB(onPerfEntry);\n });\n }\n};\n\nexport default reportWebVitals;\n","import React from 'react';\nimport ReactDOM from 'react-dom';\nimport './index.css';\nimport App from './App';\nimport reportWebVitals from './reportWebVitals';\n\nReactDOM.render(\n <React.StrictMode>\n <App />\n </React.StrictMode>,\n document.getElementById('root')\n);\n\n// If you want to start measuring performance in your app, pass a function\n// to log results (for example: reportWebVitals(console.log))\n// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals\nreportWebVitals();\n"],"sourceRoot":""}
build/static/js/runtime-main.73e65ee8.js ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ !function(e){function t(t){for(var n,u,i=t[0],c=t[1],l=t[2],s=0,p=[];s<i.length;s++)u=i[s],Object.prototype.hasOwnProperty.call(o,u)&&o[u]&&p.push(o[u][0]),o[u]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(t);p.length;)p.shift()();return a.push.apply(a,l||[]),r()}function r(){for(var e,t=0;t<a.length;t++){for(var r=a[t],n=!0,i=1;i<r.length;i++){var c=r[i];0!==o[c]&&(n=!1)}n&&(a.splice(t--,1),e=u(u.s=r[0]))}return e}var n={},o={1:0},a=[];function u(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,u),r.l=!0,r.exports}u.e=function(e){var t=[],r=o[e];if(0!==r)if(r)t.push(r[2]);else{var n=new Promise((function(t,n){r=o[e]=[t,n]}));t.push(r[2]=n);var a,i=document.createElement("script");i.charset="utf-8",i.timeout=120,u.nc&&i.setAttribute("nonce",u.nc),i.src=function(e){return u.p+"static/js/"+({}[e]||e)+"."+{3:"523cfdab"}[e]+".chunk.js"}(e);var c=new Error;a=function(t){i.onerror=i.onload=null,clearTimeout(l);var r=o[e];if(0!==r){if(r){var n=t&&("load"===t.type?"missing":t.type),a=t&&t.target&&t.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+a+")",c.name="ChunkLoadError",c.type=n,c.request=a,r[1](c)}o[e]=void 0}};var l=setTimeout((function(){a({type:"timeout",target:i})}),12e4);i.onerror=i.onload=a,document.head.appendChild(i)}return Promise.all(t)},u.m=e,u.c=n,u.d=function(e,t,r){u.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},u.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},u.t=function(e,t){if(1&t&&(e=u(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(u.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)u.d(r,n,function(t){return e[t]}.bind(null,n));return r},u.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return u.d(t,"a",t),t},u.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},u.p="./",u.oe=function(e){throw console.error(e),e};var i=this.webpackJsonpdatasetcard=this.webpackJsonpdatasetcard||[],c=i.push.bind(i);i.push=t,i=i.slice();for(var l=0;l<i.length;l++)t(i[l]);var f=c;r()}([]);
2
+ //# sourceMappingURL=runtime-main.73e65ee8.js.map
build/static/js/runtime-main.73e65ee8.js.map ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":3,"sources":["../webpack/bootstrap"],"names":["webpackJsonpCallback","data","moduleId","chunkId","chunkIds","moreModules","executeModules","i","resolves","length","Object","prototype","hasOwnProperty","call","installedChunks","push","modules","parentJsonpFunction","shift","deferredModules","apply","checkDeferredModules","result","deferredModule","fulfilled","j","depId","splice","__webpack_require__","s","installedModules","1","exports","module","l","e","promises","installedChunkData","promise","Promise","resolve","reject","onScriptComplete","script","document","createElement","charset","timeout","nc","setAttribute","src","p","jsonpScriptSrc","error","Error","event","onerror","onload","clearTimeout","chunk","errorType","type","realSrc","target","message","name","request","undefined","setTimeout","head","appendChild","all","m","c","d","getter","o","defineProperty","enumerable","get","r","Symbol","toStringTag","value","t","mode","__esModule","ns","create","key","bind","n","object","property","oe","err","console","jsonpArray","this","oldJsonpFunction","slice"],"mappings":"aACE,SAASA,EAAqBC,GAQ7B,IAPA,IAMIC,EAAUC,EANVC,EAAWH,EAAK,GAChBI,EAAcJ,EAAK,GACnBK,EAAiBL,EAAK,GAIHM,EAAI,EAAGC,EAAW,GACpCD,EAAIH,EAASK,OAAQF,IACzBJ,EAAUC,EAASG,GAChBG,OAAOC,UAAUC,eAAeC,KAAKC,EAAiBX,IAAYW,EAAgBX,IACpFK,EAASO,KAAKD,EAAgBX,GAAS,IAExCW,EAAgBX,GAAW,EAE5B,IAAID,KAAYG,EACZK,OAAOC,UAAUC,eAAeC,KAAKR,EAAaH,KACpDc,EAAQd,GAAYG,EAAYH,IAKlC,IAFGe,GAAqBA,EAAoBhB,GAEtCO,EAASC,QACdD,EAASU,OAATV,GAOD,OAHAW,EAAgBJ,KAAKK,MAAMD,EAAiBb,GAAkB,IAGvDe,IAER,SAASA,IAER,IADA,IAAIC,EACIf,EAAI,EAAGA,EAAIY,EAAgBV,OAAQF,IAAK,CAG/C,IAFA,IAAIgB,EAAiBJ,EAAgBZ,GACjCiB,GAAY,EACRC,EAAI,EAAGA,EAAIF,EAAed,OAAQgB,IAAK,CAC9C,IAAIC,EAAQH,EAAeE,GACG,IAA3BX,EAAgBY,KAAcF,GAAY,GAE3CA,IACFL,EAAgBQ,OAAOpB,IAAK,GAC5Be,EAASM,EAAoBA,EAAoBC,EAAIN,EAAe,KAItE,OAAOD,EAIR,IAAIQ,EAAmB,GAKnBhB,EAAkB,CACrBiB,EAAG,GAGAZ,EAAkB,GAQtB,SAASS,EAAoB1B,GAG5B,GAAG4B,EAAiB5B,GACnB,OAAO4B,EAAiB5B,GAAU8B,QAGnC,IAAIC,EAASH,EAAiB5B,GAAY,CACzCK,EAAGL,EACHgC,GAAG,EACHF,QAAS,IAUV,OANAhB,EAAQd,GAAUW,KAAKoB,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAG/DK,EAAOC,GAAI,EAGJD,EAAOD,QAKfJ,EAAoBO,EAAI,SAAuBhC,GAC9C,IAAIiC,EAAW,GAKXC,EAAqBvB,EAAgBX,GACzC,GAA0B,IAAvBkC,EAGF,GAAGA,EACFD,EAASrB,KAAKsB,EAAmB,QAC3B,CAEN,IAAIC,EAAU,IAAIC,SAAQ,SAASC,EAASC,GAC3CJ,EAAqBvB,EAAgBX,GAAW,CAACqC,EAASC,MAE3DL,EAASrB,KAAKsB,EAAmB,GAAKC,GAGtC,IACII,EADAC,EAASC,SAASC,cAAc,UAGpCF,EAAOG,QAAU,QACjBH,EAAOI,QAAU,IACbnB,EAAoBoB,IACvBL,EAAOM,aAAa,QAASrB,EAAoBoB,IAElDL,EAAOO,IA1DV,SAAwB/C,GACvB,OAAOyB,EAAoBuB,EAAI,cAAgB,GAAGhD,IAAUA,GAAW,IAAM,CAAC,EAAI,YAAYA,GAAW,YAyD1FiD,CAAejD,GAG5B,IAAIkD,EAAQ,IAAIC,MAChBZ,EAAmB,SAAUa,GAE5BZ,EAAOa,QAAUb,EAAOc,OAAS,KACjCC,aAAaX,GACb,IAAIY,EAAQ7C,EAAgBX,GAC5B,GAAa,IAAVwD,EAAa,CACf,GAAGA,EAAO,CACT,IAAIC,EAAYL,IAAyB,SAAfA,EAAMM,KAAkB,UAAYN,EAAMM,MAChEC,EAAUP,GAASA,EAAMQ,QAAUR,EAAMQ,OAAOb,IACpDG,EAAMW,QAAU,iBAAmB7D,EAAU,cAAgByD,EAAY,KAAOE,EAAU,IAC1FT,EAAMY,KAAO,iBACbZ,EAAMQ,KAAOD,EACbP,EAAMa,QAAUJ,EAChBH,EAAM,GAAGN,GAEVvC,EAAgBX,QAAWgE,IAG7B,IAAIpB,EAAUqB,YAAW,WACxB1B,EAAiB,CAAEmB,KAAM,UAAWE,OAAQpB,MAC1C,MACHA,EAAOa,QAAUb,EAAOc,OAASf,EACjCE,SAASyB,KAAKC,YAAY3B,GAG5B,OAAOJ,QAAQgC,IAAInC,IAIpBR,EAAoB4C,EAAIxD,EAGxBY,EAAoB6C,EAAI3C,EAGxBF,EAAoB8C,EAAI,SAAS1C,EAASiC,EAAMU,GAC3C/C,EAAoBgD,EAAE5C,EAASiC,IAClCvD,OAAOmE,eAAe7C,EAASiC,EAAM,CAAEa,YAAY,EAAMC,IAAKJ,KAKhE/C,EAAoBoD,EAAI,SAAShD,GACX,qBAAXiD,QAA0BA,OAAOC,aAC1CxE,OAAOmE,eAAe7C,EAASiD,OAAOC,YAAa,CAAEC,MAAO,WAE7DzE,OAAOmE,eAAe7C,EAAS,aAAc,CAAEmD,OAAO,KAQvDvD,EAAoBwD,EAAI,SAASD,EAAOE,GAEvC,GADU,EAAPA,IAAUF,EAAQvD,EAAoBuD,IAC/B,EAAPE,EAAU,OAAOF,EACpB,GAAW,EAAPE,GAA8B,kBAAVF,GAAsBA,GAASA,EAAMG,WAAY,OAAOH,EAChF,IAAII,EAAK7E,OAAO8E,OAAO,MAGvB,GAFA5D,EAAoBoD,EAAEO,GACtB7E,OAAOmE,eAAeU,EAAI,UAAW,CAAET,YAAY,EAAMK,MAAOA,IACtD,EAAPE,GAA4B,iBAATF,EAAmB,IAAI,IAAIM,KAAON,EAAOvD,EAAoB8C,EAAEa,EAAIE,EAAK,SAASA,GAAO,OAAON,EAAMM,IAAQC,KAAK,KAAMD,IAC9I,OAAOF,GAIR3D,EAAoB+D,EAAI,SAAS1D,GAChC,IAAI0C,EAAS1C,GAAUA,EAAOqD,WAC7B,WAAwB,OAAOrD,EAAgB,SAC/C,WAA8B,OAAOA,GAEtC,OADAL,EAAoB8C,EAAEC,EAAQ,IAAKA,GAC5BA,GAIR/C,EAAoBgD,EAAI,SAASgB,EAAQC,GAAY,OAAOnF,OAAOC,UAAUC,eAAeC,KAAK+E,EAAQC,IAGzGjE,EAAoBuB,EAAI,KAGxBvB,EAAoBkE,GAAK,SAASC,GAA2B,MAApBC,QAAQ3C,MAAM0C,GAAYA,GAEnE,IAAIE,EAAaC,KAA8B,wBAAIA,KAA8B,yBAAK,GAClFC,EAAmBF,EAAWlF,KAAK2E,KAAKO,GAC5CA,EAAWlF,KAAOf,EAClBiG,EAAaA,EAAWG,QACxB,IAAI,IAAI7F,EAAI,EAAGA,EAAI0F,EAAWxF,OAAQF,IAAKP,EAAqBiG,EAAW1F,IAC3E,IAAIU,EAAsBkF,EAI1B9E,I","file":"static/js/runtime-main.73e65ee8.js","sourcesContent":[" \t// install a JSONP callback for chunk loading\n \tfunction webpackJsonpCallback(data) {\n \t\tvar chunkIds = data[0];\n \t\tvar moreModules = data[1];\n \t\tvar executeModules = data[2];\n\n \t\t// add \"moreModules\" to the modules object,\n \t\t// then flag all \"chunkIds\" as loaded and fire callback\n \t\tvar moduleId, chunkId, i = 0, resolves = [];\n \t\tfor(;i < chunkIds.length; i++) {\n \t\t\tchunkId = chunkIds[i];\n \t\t\tif(Object.prototype.hasOwnProperty.call(installedChunks, chunkId) && installedChunks[chunkId]) {\n \t\t\t\tresolves.push(installedChunks[chunkId][0]);\n \t\t\t}\n \t\t\tinstalledChunks[chunkId] = 0;\n \t\t}\n \t\tfor(moduleId in moreModules) {\n \t\t\tif(Object.prototype.hasOwnProperty.call(moreModules, moduleId)) {\n \t\t\t\tmodules[moduleId] = moreModules[moduleId];\n \t\t\t}\n \t\t}\n \t\tif(parentJsonpFunction) parentJsonpFunction(data);\n\n \t\twhile(resolves.length) {\n \t\t\tresolves.shift()();\n \t\t}\n\n \t\t// add entry modules from loaded chunk to deferred list\n \t\tdeferredModules.push.apply(deferredModules, executeModules || []);\n\n \t\t// run deferred modules when all chunks ready\n \t\treturn checkDeferredModules();\n \t};\n \tfunction checkDeferredModules() {\n \t\tvar result;\n \t\tfor(var i = 0; i < deferredModules.length; i++) {\n \t\t\tvar deferredModule = deferredModules[i];\n \t\t\tvar fulfilled = true;\n \t\t\tfor(var j = 1; j < deferredModule.length; j++) {\n \t\t\t\tvar depId = deferredModule[j];\n \t\t\t\tif(installedChunks[depId] !== 0) fulfilled = false;\n \t\t\t}\n \t\t\tif(fulfilled) {\n \t\t\t\tdeferredModules.splice(i--, 1);\n \t\t\t\tresult = __webpack_require__(__webpack_require__.s = deferredModule[0]);\n \t\t\t}\n \t\t}\n\n \t\treturn result;\n \t}\n\n \t// The module cache\n \tvar installedModules = {};\n\n \t// object to store loaded and loading chunks\n \t// undefined = chunk not loaded, null = chunk preloaded/prefetched\n \t// Promise = chunk loading, 0 = chunk loaded\n \tvar installedChunks = {\n \t\t1: 0\n \t};\n\n \tvar deferredModules = [];\n\n \t// script path function\n \tfunction jsonpScriptSrc(chunkId) {\n \t\treturn __webpack_require__.p + \"static/js/\" + ({}[chunkId]||chunkId) + \".\" + {\"3\":\"523cfdab\"}[chunkId] + \".chunk.js\"\n \t}\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId]) {\n \t\t\treturn installedModules[moduleId].exports;\n \t\t}\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\ti: moduleId,\n \t\t\tl: false,\n \t\t\texports: {}\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.l = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n \t// This file contains only the entry chunk.\n \t// The chunk loading function for additional chunks\n \t__webpack_require__.e = function requireEnsure(chunkId) {\n \t\tvar promises = [];\n\n\n \t\t// JSONP chunk loading for javascript\n\n \t\tvar installedChunkData = installedChunks[chunkId];\n \t\tif(installedChunkData !== 0) { // 0 means \"already installed\".\n\n \t\t\t// a Promise means \"currently loading\".\n \t\t\tif(installedChunkData) {\n \t\t\t\tpromises.push(installedChunkData[2]);\n \t\t\t} else {\n \t\t\t\t// setup Promise in chunk cache\n \t\t\t\tvar promise = new Promise(function(resolve, reject) {\n \t\t\t\t\tinstalledChunkData = installedChunks[chunkId] = [resolve, reject];\n \t\t\t\t});\n \t\t\t\tpromises.push(installedChunkData[2] = promise);\n\n \t\t\t\t// start chunk loading\n \t\t\t\tvar script = document.createElement('script');\n \t\t\t\tvar onScriptComplete;\n\n \t\t\t\tscript.charset = 'utf-8';\n \t\t\t\tscript.timeout = 120;\n \t\t\t\tif (__webpack_require__.nc) {\n \t\t\t\t\tscript.setAttribute(\"nonce\", __webpack_require__.nc);\n \t\t\t\t}\n \t\t\t\tscript.src = jsonpScriptSrc(chunkId);\n\n \t\t\t\t// create error before stack unwound to get useful stacktrace later\n \t\t\t\tvar error = new Error();\n \t\t\t\tonScriptComplete = function (event) {\n \t\t\t\t\t// avoid mem leaks in IE.\n \t\t\t\t\tscript.onerror = script.onload = null;\n \t\t\t\t\tclearTimeout(timeout);\n \t\t\t\t\tvar chunk = installedChunks[chunkId];\n \t\t\t\t\tif(chunk !== 0) {\n \t\t\t\t\t\tif(chunk) {\n \t\t\t\t\t\t\tvar errorType = event && (event.type === 'load' ? 'missing' : event.type);\n \t\t\t\t\t\t\tvar realSrc = event && event.target && event.target.src;\n \t\t\t\t\t\t\terror.message = 'Loading chunk ' + chunkId + ' failed.\\n(' + errorType + ': ' + realSrc + ')';\n \t\t\t\t\t\t\terror.name = 'ChunkLoadError';\n \t\t\t\t\t\t\terror.type = errorType;\n \t\t\t\t\t\t\terror.request = realSrc;\n \t\t\t\t\t\t\tchunk[1](error);\n \t\t\t\t\t\t}\n \t\t\t\t\t\tinstalledChunks[chunkId] = undefined;\n \t\t\t\t\t}\n \t\t\t\t};\n \t\t\t\tvar timeout = setTimeout(function(){\n \t\t\t\t\tonScriptComplete({ type: 'timeout', target: script });\n \t\t\t\t}, 120000);\n \t\t\t\tscript.onerror = script.onload = onScriptComplete;\n \t\t\t\tdocument.head.appendChild(script);\n \t\t\t}\n \t\t}\n \t\treturn Promise.all(promises);\n \t};\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// define getter function for harmony exports\n \t__webpack_require__.d = function(exports, name, getter) {\n \t\tif(!__webpack_require__.o(exports, name)) {\n \t\t\tObject.defineProperty(exports, name, { enumerable: true, get: getter });\n \t\t}\n \t};\n\n \t// define __esModule on exports\n \t__webpack_require__.r = function(exports) {\n \t\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n \t\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n \t\t}\n \t\tObject.defineProperty(exports, '__esModule', { value: true });\n \t};\n\n \t// create a fake namespace object\n \t// mode & 1: value is a module id, require it\n \t// mode & 2: merge all properties of value into the ns\n \t// mode & 4: return value when already ns object\n \t// mode & 8|1: behave like require\n \t__webpack_require__.t = function(value, mode) {\n \t\tif(mode & 1) value = __webpack_require__(value);\n \t\tif(mode & 8) return value;\n \t\tif((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;\n \t\tvar ns = Object.create(null);\n \t\t__webpack_require__.r(ns);\n \t\tObject.defineProperty(ns, 'default', { enumerable: true, value: value });\n \t\tif(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));\n \t\treturn ns;\n \t};\n\n \t// getDefaultExport function for compatibility with non-harmony modules\n \t__webpack_require__.n = function(module) {\n \t\tvar getter = module && module.__esModule ?\n \t\t\tfunction getDefault() { return module['default']; } :\n \t\t\tfunction getModuleExports() { return module; };\n \t\t__webpack_require__.d(getter, 'a', getter);\n \t\treturn getter;\n \t};\n\n \t// Object.prototype.hasOwnProperty.call\n \t__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"./\";\n\n \t// on error function for async loading\n \t__webpack_require__.oe = function(err) { console.error(err); throw err; };\n\n \tvar jsonpArray = this[\"webpackJsonpdatasetcard\"] = this[\"webpackJsonpdatasetcard\"] || [];\n \tvar oldJsonpFunction = jsonpArray.push.bind(jsonpArray);\n \tjsonpArray.push = webpackJsonpCallback;\n \tjsonpArray = jsonpArray.slice();\n \tfor(var i = 0; i < jsonpArray.length; i++) webpackJsonpCallback(jsonpArray[i]);\n \tvar parentJsonpFunction = oldJsonpFunction;\n\n\n \t// run deferred modules from other chunks\n \tcheckDeferredModules();\n"],"sourceRoot":""}