{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Explore Dataset: docugami/dfm-csl-large-benchmark\n", "\n", "See [README](./README.md) for more information" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "DATASET_NAME=\"Docugami/dfm-csl-large-benchmark\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# install dependencies and get a token\n", "!pip install datasets huggingface_hub ipywidgets --quiet\n", "\n", "from huggingface_hub import notebook_login\n", "notebook_login()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Explore Dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset\n", "\n", "# Download and check out metadata for dataset\n", "dataset = load_dataset(DATASET_NAME, split=\"eval\")\n", "dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# let's look at one of the rows\n", "dataset[-1]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Update Dataset from Github\n", "Note: this requires write access to the dataset." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# download latest CSVs from github\n", "!rm -rf ./temp\n", "!wget -P ./temp/eval https://raw.githubusercontent.com/docugami/DFM-benchmarks/main/data/annotations/eval/CSL-Large.csv\n", "!wget -P ./temp/train https://raw.githubusercontent.com/docugami/DFM-benchmarks/main/data/annotations/train/CSL-Large.csv" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from datasets import Dataset\n", "\n", "# Load the data from the csv files\n", "eval_set = Dataset.from_csv('./temp/eval/CSL-Large.csv')\n", "train_set = Dataset.from_csv('./temp/train/CSL-Large.csv')\n", "\n", "# Push the dataset to the Hugging Face Hub\n", "eval_set.push_to_hub(DATASET_NAME, split=\"eval\")\n", "train_set.push_to_hub(DATASET_NAME, split=\"train\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }