# -*- coding: utf-8 -*- """yelp_dataset.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/14UtK4YCjMSx4cVbUb9NBRHviWZg07dtY """ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os from typing import List import datasets import logging import pandas as pd # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This dataset encompasses a wealth of information from the Yelp platform, detailing user reviews, business ratings, and operational specifics across a diverse array of local establishments. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "https://www.yelp.com/dataset/download" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "https://s3-media0.fl.yelpcdn.com/assets/srv0/engineering_pages/f64cb2d3efcc/assets/vendor/Dataset_User_Agreement.pdf" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URL = "https://yelpdata.s3.us-west-2.amazonaws.com/" _URLS = { "business": _URL + "yelp_academic_dataset_business.json", "review": _URL + "yelp_academic_dataset_review.json", } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class YelpDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" _URLS = _URLS VERSION = datasets.Version("1.1.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "business_id": datasets.Value("string"), "name": datasets.Value("string"), "address": datasets.Value("string"), "city": datasets.Value("string"), "state": datasets.Value("string"), "postal_code": datasets.Value("string"), "latitude": datasets.Value("float"), "longitude": datasets.Value("float"), "stars_x": datasets.Value("float"), "review_count": datasets.Value("float"), "is_open": datasets.Value("float"), "categories": datasets.Value("string"), "hours": datasets.Value("string"), "review_id": datasets.Value("string"), "user_id": datasets.Value("string"), "stars_y": datasets.Value("float"), "useful": datasets.Value("float"), "funny": datasets.Value("float"), "cool": datasets.Value("float"), "text": datasets.Value("string"), "date": datasets.Value("string"), "attributes": datasets.Value("string"), }), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://www.yelp.com/dataset/download", citation=_CITATION, ) def _generate_examples(self, filepaths): logging.info("Generating examples from = %s", filepaths) # Load JSON files into pandas DataFrames business_df = pd.read_json(filepaths['business'], lines=True) review_df = pd.read_json(filepaths['review'], lines=True) # Merge DataFrames on 'business_id' merged_df = pd.merge(business_df, review_df, on='business_id') # Filter out entries where 'categories' does not contain 'Restaurants' filtered_df = merged_df[merged_df['categories'].str.contains("Restaurants", na=False)] # Convert to CSV (optional step if you need CSV output) # filtered_df.to_csv('filtered_dataset.csv', index=False) # Generate examples for index, row in filtered_df.iterrows(): # Handle missing values for float fields for key, value in row.items(): if pd.isnull(value): row[key] = None # or appropriate handling of nulls based on your requirements # Yield each row as an example yield index, row.to_dict()