repo_name
stringclasses
5 values
repo_url
stringclasses
5 values
repo_description
stringclasses
3 values
repo_stars
int64
6
15.8k
repo_forks
int64
192
3.6k
repo_last_updated
stringclasses
5 values
repo_created_at
stringclasses
5 values
repo_size
int64
513
2.13k
repo_license
stringclasses
4 values
language
stringclasses
2 values
text
stringlengths
0
27.5k
avg_line_length
float64
0
74.3
max_line_length
int64
0
652
alphnanum_fraction
float64
0
0.8
sfguide-data-engineering-with-snowpark-python
https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python
null
53
2,339
2023-11-18 06:33:34+00:00
2023-01-23 16:06:45+00:00
513
Apache License 2.0
Python
0
0
0
sfguide-data-engineering-with-snowpark-python
https://github.com/Snowflake-Labs/sfguide-data-engineering-with-snowpark-python
null
53
2,339
2023-11-18 06:33:34+00:00
2023-01-23 16:06:45+00:00
513
Apache License 2.0
Python
from snowflake.snowpark import Session import os from typing import Optional # Class to store a singleton connection option class SnowflakeConnection(object): _connection = None @property def connection(self) -> Optional[Session]: return type(self)._connection @connection.setter def connection(self, val): type(self)._connection = val # Function to return a configured Snowpark session def get_snowpark_session() -> Session: # if running in snowflake if SnowflakeConnection().connection: # Not sure what this does? session = SnowflakeConnection().connection # if running locally with a config file # TODO: Look for a creds.json style file. This should be the way all snowpark # related tools work IMO # if using snowsql config, like snowcli does elif os.path.exists(os.path.expanduser('~/.snowsql/config')): snowpark_config = get_snowsql_config() SnowflakeConnection().connection = Session.builder.configs(snowpark_config).create() # otherwise configure from environment variables elif "SNOWSQL_ACCOUNT" in os.environ: snowpark_config = { "account": os.environ["SNOWSQL_ACCOUNT"], "user": os.environ["SNOWSQL_USER"], "password": os.environ["SNOWSQL_PWD"], "role": os.environ["SNOWSQL_ROLE"], "warehouse": os.environ["SNOWSQL_WAREHOUSE"], "database": os.environ["SNOWSQL_DATABASE"], "schema": os.environ["SNOWSQL_SCHEMA"] } SnowflakeConnection().connection = Session.builder.configs(snowpark_config).create() if SnowflakeConnection().connection: return SnowflakeConnection().connection # type: ignore else: raise Exception("Unable to create a Snowpark session") # Mimic the snowcli logic for getting config details, but skip the app.toml processing # since this will be called outside the snowcli app context. # TODO: It would be nice to get rid of this entirely and always use creds.json but # need to update snowcli to make that happen def get_snowsql_config( connection_name: str = 'dev', config_file_path: str = os.path.expanduser('~/.snowsql/config'), ) -> dict: import configparser snowsql_to_snowpark_config_mapping = { 'account': 'account', 'accountname': 'account', 'username': 'user', 'password': 'password', 'rolename': 'role', 'warehousename': 'warehouse', 'dbname': 'database', 'schemaname': 'schema' } try: config = configparser.ConfigParser(inline_comment_prefixes="#") connection_path = 'connections.' + connection_name config.read(config_file_path) session_config = config[connection_path] # Convert snowsql connection variable names to snowcli ones session_config_dict = { snowsql_to_snowpark_config_mapping[k]: v.strip('"') for k, v in session_config.items() } return session_config_dict except Exception: raise Exception( "Error getting snowsql config details" )
35.917647
92
0.654128
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os import logging from airflow import DAG from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.operators.python import PythonOperator from google.cloud import storage from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator import pyarrow.csv as pv import pyarrow.parquet as pq PROJECT_ID = os.environ.get("GCP_PROJECT_ID") BUCKET = os.environ.get("GCP_GCS_BUCKET") dataset_file = "yellow_tripdata_2021-01.csv" dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}" path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/") parquet_file = dataset_file.replace('.csv', '.parquet') BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all') def format_to_parquet(src_file): if not src_file.endswith('.csv'): logging.error("Can only accept source files in CSV format, for the moment") return table = pv.read_csv(src_file) pq.write_table(table, src_file.replace('.csv', '.parquet')) # NOTE: takes 20 mins, at an upload speed of 800kbps. Faster if your internet has a better upload speed def upload_to_gcs(bucket, object_name, local_file): """ Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python :param bucket: GCS bucket name :param object_name: target path & file-name :param local_file: source path & file-name :return: """ # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed. # (Ref: https://github.com/googleapis/python-storage/issues/74) storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB # End of Workaround client = storage.Client() bucket = client.bucket(bucket) blob = bucket.blob(object_name) blob.upload_from_filename(local_file) default_args = { "owner": "airflow", "start_date": days_ago(1), "depends_on_past": False, "retries": 1, } # NOTE: DAG declaration - using a Context Manager (an implicit way) with DAG( dag_id="data_ingestion_gcs_dag", schedule_interval="@daily", default_args=default_args, catchup=False, max_active_runs=1, tags=['dtc-de'], ) as dag: download_dataset_task = BashOperator( task_id="download_dataset_task", bash_command=f"curl -sSL {dataset_url} > {path_to_local_home}/{dataset_file}" ) format_to_parquet_task = PythonOperator( task_id="format_to_parquet_task", python_callable=format_to_parquet, op_kwargs={ "src_file": f"{path_to_local_home}/{dataset_file}", }, ) # TODO: Homework - research and try XCOM to communicate output values between 2 tasks/operators local_to_gcs_task = PythonOperator( task_id="local_to_gcs_task", python_callable=upload_to_gcs, op_kwargs={ "bucket": BUCKET, "object_name": f"raw/{parquet_file}", "local_file": f"{path_to_local_home}/{parquet_file}", }, ) bigquery_external_table_task = BigQueryCreateExternalTableOperator( task_id="bigquery_external_table_task", table_resource={ "tableReference": { "projectId": PROJECT_ID, "datasetId": BIGQUERY_DATASET, "tableId": "external_table", }, "externalDataConfiguration": { "sourceFormat": "PARQUET", "sourceUris": [f"gs://{BUCKET}/raw/{parquet_file}"], }, }, ) download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> bigquery_external_table_task
32.423423
104
0.65031
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os from datetime import datetime from airflow import DAG from airflow.operators.bash import BashOperator from airflow.operators.python import PythonOperator from ingest_script import ingest_callable AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/") PG_HOST = os.getenv('PG_HOST') PG_USER = os.getenv('PG_USER') PG_PASSWORD = os.getenv('PG_PASSWORD') PG_PORT = os.getenv('PG_PORT') PG_DATABASE = os.getenv('PG_DATABASE') local_workflow = DAG( "LocalIngestionDag", schedule_interval="0 6 2 * *", start_date=datetime(2021, 1, 1) ) URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data' URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' OUTPUT_FILE_TEMPLATE = AIRFLOW_HOME + '/output_{{ execution_date.strftime(\'%Y-%m\') }}.csv' TABLE_NAME_TEMPLATE = 'yellow_taxi_{{ execution_date.strftime(\'%Y_%m\') }}' with local_workflow: wget_task = BashOperator( task_id='wget', bash_command=f'curl -sSL {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}' ) ingest_task = PythonOperator( task_id="ingest", python_callable=ingest_callable, op_kwargs=dict( user=PG_USER, password=PG_PASSWORD, host=PG_HOST, port=PG_PORT, db=PG_DATABASE, table_name=TABLE_NAME_TEMPLATE, csv_file=OUTPUT_FILE_TEMPLATE ), ) wget_task >> ingest_task
25.327273
92
0.639945
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os from time import time import pandas as pd from sqlalchemy import create_engine def ingest_callable(user, password, host, port, db, table_name, csv_file, execution_date): print(table_name, csv_file, execution_date) engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}') engine.connect() print('connection established successfully, inserting data...') t_start = time() df_iter = pd.read_csv(csv_file, iterator=True, chunksize=100000) df = next(df_iter) df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime) df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime) df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace') df.to_sql(name=table_name, con=engine, if_exists='append') t_end = time() print('inserted the first chunk, took %.3f second' % (t_end - t_start)) while True: t_start = time() try: df = next(df_iter) except StopIteration: print("completed") break df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime) df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime) df.to_sql(name=table_name, con=engine, if_exists='append') t_end = time() print('inserted another chunk, took %.3f second' % (t_end - t_start))
27.306122
90
0.6443
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os from datetime import datetime from airflow import DAG from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.operators.python import PythonOperator from google.cloud import storage PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "pivotal-surfer-336713") BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc_data_lake_pivotal-surfer-336713") dataset_file = "yellow_tripdata_2021-01.csv" dataset_url = f"https://s3.amazonaws.com/nyc-tlc/trip+data/{dataset_file}" path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/") path_to_creds = f"{path_to_local_home}/google_credentials.json" default_args = { "owner": "airflow", "start_date": days_ago(1), "depends_on_past": False, "retries": 1, } # # Takes 15-20 mins to run. Good case for using Spark (distributed processing, in place of chunks) # def upload_to_gcs(bucket, object_name, local_file): # """ # Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python # :param bucket: GCS bucket name # :param object_name: target path & file-name # :param local_file: source path & file-name # :return: # """ # # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload link. # # (Ref: https://github.com/googleapis/python-storage/issues/74) # storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB # storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB # # client = storage.Client() # bucket = client.bucket(bucket) # # blob = bucket.blob(object_name) # # blob.chunk_size = 5 * 1024 * 1024 # blob.upload_from_filename(local_file) with DAG( dag_id="data_ingestion_gcs_dag", schedule_interval="@daily", default_args=default_args, catchup=True, max_active_runs=1, ) as dag: # Takes ~2 mins, depending upon your internet's download speed download_dataset_task = BashOperator( task_id="download_dataset_task", bash_command=f"curl -sS {dataset_url} > {path_to_local_home}/{dataset_file}" # "&& unzip {zip_file} && rm {zip_file}" ) # # APPROACH 1: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed) # upload_to_gcs_task = PythonOperator( # task_id="upload_to_gcs_task", # python_callable=upload_to_gcs, # op_kwargs={ # "bucket": BUCKET, # "object_name": f"raw/{dataset_file}", # "local_file": f"{path_to_local_home}/{dataset_file}", # # }, # ) # OR APPROACH 2: (takes 20 mins, at an upload speed of 800Kbps. Faster if your internet has a better upload speed) # Ref: https://cloud.google.com/blog/products/gcp/optimizing-your-cloud-storage-performance-google-cloud-performance-atlas upload_to_gcs_task = BashOperator( task_id="upload_to_gcs_task", bash_command=f"gcloud auth activate-service-account --key-file={path_to_creds} && \ gsutil -m cp {path_to_local_home}/{dataset_file} gs://{BUCKET}", ) download_dataset_task >> upload_to_gcs_task
36.421687
128
0.66087
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os import logging from datetime import datetime from airflow import DAG from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.operators.python import PythonOperator from google.cloud import storage import pyarrow.csv as pv import pyarrow.parquet as pq PROJECT_ID = os.environ.get("GCP_PROJECT_ID") BUCKET = os.environ.get("GCP_GCS_BUCKET") AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/") def format_to_parquet(src_file, dest_file): if not src_file.endswith('.csv'): logging.error("Can only accept source files in CSV format, for the moment") return table = pv.read_csv(src_file) pq.write_table(table, dest_file) def upload_to_gcs(bucket, object_name, local_file): client = storage.Client() bucket = client.bucket(bucket) blob = bucket.blob(object_name) blob.upload_from_filename(local_file) default_args = { "owner": "airflow", #"start_date": days_ago(1), "depends_on_past": False, "retries": 1, } def donwload_parquetize_upload_dag( dag, url_template, local_csv_path_template, local_parquet_path_template, gcs_path_template ): with dag: download_dataset_task = BashOperator( task_id="download_dataset_task", bash_command=f"curl -sSLf {url_template} > {local_csv_path_template}" ) format_to_parquet_task = PythonOperator( task_id="format_to_parquet_task", python_callable=format_to_parquet, op_kwargs={ "src_file": local_csv_path_template, "dest_file": local_parquet_path_template }, ) local_to_gcs_task = PythonOperator( task_id="local_to_gcs_task", python_callable=upload_to_gcs, op_kwargs={ "bucket": BUCKET, "object_name": gcs_path_template, "local_file": local_parquet_path_template, }, ) rm_task = BashOperator( task_id="rm_task", bash_command=f"rm {local_csv_path_template} {local_parquet_path_template}" ) download_dataset_task >> format_to_parquet_task >> local_to_gcs_task >> rm_task URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data' YELLOW_TAXI_URL_TEMPLATE = URL_PREFIX + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' YELLOW_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' YELLOW_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet' YELLOW_TAXI_GCS_PATH_TEMPLATE = "raw/yellow_tripdata/{{ execution_date.strftime(\'%Y\') }}/yellow_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet" yellow_taxi_data_dag = DAG( dag_id="yellow_taxi_data_v2", schedule_interval="0 6 2 * *", start_date=datetime(2019, 1, 1), default_args=default_args, catchup=True, max_active_runs=3, tags=['dtc-de'], ) donwload_parquetize_upload_dag( dag=yellow_taxi_data_dag, url_template=YELLOW_TAXI_URL_TEMPLATE, local_csv_path_template=YELLOW_TAXI_CSV_FILE_TEMPLATE, local_parquet_path_template=YELLOW_TAXI_PARQUET_FILE_TEMPLATE, gcs_path_template=YELLOW_TAXI_GCS_PATH_TEMPLATE ) # https://s3.amazonaws.com/nyc-tlc/trip+data/green_tripdata_2021-01.csv GREEN_TAXI_URL_TEMPLATE = URL_PREFIX + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' GREEN_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' GREEN_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet' GREEN_TAXI_GCS_PATH_TEMPLATE = "raw/green_tripdata/{{ execution_date.strftime(\'%Y\') }}/green_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet" green_taxi_data_dag = DAG( dag_id="green_taxi_data_v1", schedule_interval="0 7 2 * *", start_date=datetime(2019, 1, 1), default_args=default_args, catchup=True, max_active_runs=3, tags=['dtc-de'], ) donwload_parquetize_upload_dag( dag=green_taxi_data_dag, url_template=GREEN_TAXI_URL_TEMPLATE, local_csv_path_template=GREEN_TAXI_CSV_FILE_TEMPLATE, local_parquet_path_template=GREEN_TAXI_PARQUET_FILE_TEMPLATE, gcs_path_template=GREEN_TAXI_GCS_PATH_TEMPLATE ) # https://nyc-tlc.s3.amazonaws.com/trip+data/fhv_tripdata_2021-01.csv FHV_TAXI_URL_TEMPLATE = URL_PREFIX + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' FHV_TAXI_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.csv' FHV_TAXI_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet' FHV_TAXI_GCS_PATH_TEMPLATE = "raw/fhv_tripdata/{{ execution_date.strftime(\'%Y\') }}/fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}.parquet" fhv_taxi_data_dag = DAG( dag_id="hfv_taxi_data_v1", schedule_interval="0 8 2 * *", start_date=datetime(2019, 1, 1), end_date=datetime(2020, 1, 1), default_args=default_args, catchup=True, max_active_runs=3, tags=['dtc-de'], ) donwload_parquetize_upload_dag( dag=fhv_taxi_data_dag, url_template=FHV_TAXI_URL_TEMPLATE, local_csv_path_template=FHV_TAXI_CSV_FILE_TEMPLATE, local_parquet_path_template=FHV_TAXI_PARQUET_FILE_TEMPLATE, gcs_path_template=FHV_TAXI_GCS_PATH_TEMPLATE ) # https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv ZONES_URL_TEMPLATE = 'https://s3.amazonaws.com/nyc-tlc/misc/taxi+_zone_lookup.csv' ZONES_CSV_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.csv' ZONES_PARQUET_FILE_TEMPLATE = AIRFLOW_HOME + '/taxi_zone_lookup.parquet' ZONES_GCS_PATH_TEMPLATE = "raw/taxi_zone/taxi_zone_lookup.parquet" zones_data_dag = DAG( dag_id="zones_data_v1", schedule_interval="@once", start_date=days_ago(1), default_args=default_args, catchup=True, max_active_runs=3, tags=['dtc-de'], ) donwload_parquetize_upload_dag( dag=zones_data_dag, url_template=ZONES_URL_TEMPLATE, local_csv_path_template=ZONES_CSV_FILE_TEMPLATE, local_parquet_path_template=ZONES_PARQUET_FILE_TEMPLATE, gcs_path_template=ZONES_GCS_PATH_TEMPLATE )
32.393617
156
0.665127
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import os import logging from airflow import DAG from airflow.utils.dates import days_ago from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator, BigQueryInsertJobOperator from airflow.providers.google.cloud.transfers.gcs_to_gcs import GCSToGCSOperator PROJECT_ID = os.environ.get("GCP_PROJECT_ID") BUCKET = os.environ.get("GCP_GCS_BUCKET") path_to_local_home = os.environ.get("AIRFLOW_HOME", "/opt/airflow/") BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all') DATASET = "tripdata" COLOUR_RANGE = {'yellow': 'tpep_pickup_datetime', 'green': 'lpep_pickup_datetime'} INPUT_PART = "raw" INPUT_FILETYPE = "parquet" default_args = { "owner": "airflow", "start_date": days_ago(1), "depends_on_past": False, "retries": 1, } # NOTE: DAG declaration - using a Context Manager (an implicit way) with DAG( dag_id="gcs_2_bq_dag", schedule_interval="@daily", default_args=default_args, catchup=False, max_active_runs=1, tags=['dtc-de'], ) as dag: for colour, ds_col in COLOUR_RANGE.items(): move_files_gcs_task = GCSToGCSOperator( task_id=f'move_{colour}_{DATASET}_files_task', source_bucket=BUCKET, source_object=f'{INPUT_PART}/{colour}_{DATASET}*.{INPUT_FILETYPE}', destination_bucket=BUCKET, destination_object=f'{colour}/{colour}_{DATASET}', move_object=True ) bigquery_external_table_task = BigQueryCreateExternalTableOperator( task_id=f"bq_{colour}_{DATASET}_external_table_task", table_resource={ "tableReference": { "projectId": PROJECT_ID, "datasetId": BIGQUERY_DATASET, "tableId": f"{colour}_{DATASET}_external_table", }, "externalDataConfiguration": { "autodetect": "True", "sourceFormat": f"{INPUT_FILETYPE.upper()}", "sourceUris": [f"gs://{BUCKET}/{colour}/*"], }, }, ) CREATE_BQ_TBL_QUERY = ( f"CREATE OR REPLACE TABLE {BIGQUERY_DATASET}.{colour}_{DATASET} \ PARTITION BY DATE({ds_col}) \ AS \ SELECT * FROM {BIGQUERY_DATASET}.{colour}_{DATASET}_external_table;" ) # Create a partitioned table from external table bq_create_partitioned_table_job = BigQueryInsertJobOperator( task_id=f"bq_create_{colour}_{DATASET}_partitioned_table_task", configuration={ "query": { "query": CREATE_BQ_TBL_QUERY, "useLegacySql": False, } } ) move_files_gcs_task >> bigquery_external_table_task >> bq_create_partitioned_table_job
33.890244
124
0.58951
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from confluent_kafka import Producer import argparse import csv from typing import Dict from time import sleep from settings import CONFLUENT_CLOUD_CONFIG, \ GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, \ GREEN_TRIP_DATA_PATH, FHV_TRIP_DATA_PATH class RideCSVProducer: def __init__(self, probs: Dict, ride_type: str): self.producer = Producer(**probs) self.ride_type = ride_type def parse_row(self, row): if self.ride_type == 'green': record = f'{row[5]}, {row[6]}' # PULocationID, DOLocationID key = str(row[0]) # vendor_id elif self.ride_type == 'fhv': record = f'{row[3]}, {row[4]}' # PULocationID, DOLocationID, key = str(row[0]) # dispatching_base_num return key, record def read_records(self, resource_path: str): records, ride_keys = [], [] with open(resource_path, 'r') as f: reader = csv.reader(f) header = next(reader) # skip the header for row in reader: key, record = self.parse_row(row) ride_keys.append(key) records.append(record) return zip(ride_keys, records) def publish(self, records: [str, str], topic: str): for key_value in records: key, value = key_value try: self.producer.poll(0) self.producer.produce(topic=topic, key=key, value=value) print(f"Producing record for <key: {key}, value:{value}>") except KeyboardInterrupt: break except BufferError as bfer: self.producer.poll(0.1) except Exception as e: print(f"Exception while producing record - {value}: {e}") self.producer.flush() sleep(10) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Kafka Consumer') parser.add_argument('--type', type=str, default='green') args = parser.parse_args() if args.type == 'green': kafka_topic = GREEN_TAXI_TOPIC data_path = GREEN_TRIP_DATA_PATH elif args.type == 'fhv': kafka_topic = FHV_TAXI_TOPIC data_path = FHV_TRIP_DATA_PATH producer = RideCSVProducer(ride_type=args.type, probs=CONFLUENT_CLOUD_CONFIG) ride_records = producer.read_records(resource_path=data_path) producer.publish(records=ride_records, topic=kafka_topic)
32.819444
81
0.587921
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import pyspark.sql.types as T INPUT_DATA_PATH = '../../resources/rides.csv' BOOTSTRAP_SERVERS = 'localhost:9092' TOPIC_WINDOWED_VENDOR_ID_COUNT = 'vendor_counts_windowed' PRODUCE_TOPIC_RIDES_CSV = CONSUME_TOPIC_RIDES_CSV = 'rides_csv' RIDE_SCHEMA = T.StructType( [T.StructField("vendor_id", T.IntegerType()), T.StructField('tpep_pickup_datetime', T.TimestampType()), T.StructField('tpep_dropoff_datetime', T.TimestampType()), T.StructField("passenger_count", T.IntegerType()), T.StructField("trip_distance", T.FloatType()), T.StructField("payment_type", T.IntegerType()), T.StructField("total_amount", T.FloatType()), ])
34
63
0.691265
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from pyspark.sql import SparkSession import pyspark.sql.functions as F from settings import CONFLUENT_CLOUD_CONFIG, GREEN_TAXI_TOPIC, FHV_TAXI_TOPIC, RIDES_TOPIC, ALL_RIDE_SCHEMA def read_from_kafka(consume_topic: str): # Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option df_stream = spark \ .readStream \ .format("kafka") \ .option("kafka.bootstrap.servers", CONFLUENT_CLOUD_CONFIG['bootstrap.servers']) \ .option("subscribe", consume_topic) \ .option("startingOffsets", "earliest") \ .option("checkpointLocation", "checkpoint") \ .option("kafka.security.protocol", "SASL_SSL") \ .option("kafka.sasl.mechanism", "PLAIN") \ .option("kafka.sasl.jaas.config", f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \ .option("failOnDataLoss", False) \ .load() return df_stream def parse_rides(df, schema): """ take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """ assert df.isStreaming is True, "DataFrame doesn't receive streaming data" df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)") # split attributes to nested array in one Column col = F.split(df['value'], ', ') # expand col to multiple top-level columns for idx, field in enumerate(schema): df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType)) df = df.na.drop() df.printSchema() return df.select([field.name for field in schema]) def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'): query = df.writeStream \ .outputMode(output_mode) \ .trigger(processingTime=processing_time) \ .format("console") \ .option("truncate", False) \ .start() \ .awaitTermination() return query # pyspark.sql.streaming.StreamingQuery def sink_kafka(df, topic, output_mode: str = 'complete'): query = df.writeStream \ .format("kafka") \ .option("kafka.bootstrap.servers", "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092") \ .outputMode(output_mode) \ .option("topic", topic) \ .option("checkpointLocation", "checkpoint") \ .option("kafka.security.protocol", "SASL_SSL") \ .option("kafka.sasl.mechanism", "PLAIN") \ .option("kafka.sasl.jaas.config", f"""org.apache.kafka.common.security.plain.PlainLoginModule required username="{CONFLUENT_CLOUD_CONFIG['sasl.username']}" password="{CONFLUENT_CLOUD_CONFIG['sasl.password']}";""") \ .option("failOnDataLoss", False) \ .start() return query def op_groupby(df, column_names): df_aggregation = df.groupBy(column_names).count() return df_aggregation if __name__ == "__main__": spark = SparkSession.builder.appName('streaming-homework').getOrCreate() spark.sparkContext.setLogLevel('WARN') # Step 1: Consume GREEN_TAXI_TOPIC and FHV_TAXI_TOPIC df_green_rides = read_from_kafka(consume_topic=GREEN_TAXI_TOPIC) df_fhv_rides = read_from_kafka(consume_topic=FHV_TAXI_TOPIC) # Step 2: Publish green and fhv rides to RIDES_TOPIC kafka_sink_green_query = sink_kafka(df=df_green_rides, topic=RIDES_TOPIC, output_mode='append') kafka_sink_fhv_query = sink_kafka(df=df_fhv_rides, topic=RIDES_TOPIC, output_mode='append') # Step 3: Read RIDES_TOPIC and parse it in ALL_RIDE_SCHEMA df_all_rides = read_from_kafka(consume_topic=RIDES_TOPIC) df_all_rides = parse_rides(df_all_rides, ALL_RIDE_SCHEMA) # Step 4: Apply Aggregation on the all_rides df_pu_location_count = op_groupby(df_all_rides, ['PULocationID']) df_pu_location_count = df_pu_location_count.sort(F.col('count').desc()) # Step 5: Sink Aggregation Streams to Console console_sink_pu_location = sink_console(df_pu_location_count, output_mode='complete')
39.87
197
0.666911
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
#!/usr/bin/env python # coding: utf-8 import os import argparse from time import time import pandas as pd from sqlalchemy import create_engine def main(params): user = params.user password = params.password host = params.host port = params.port db = params.db table_name = params.table_name url = params.url # the backup files are gzipped, and it's important to keep the correct extension # for pandas to be able to open the file if url.endswith('.csv.gz'): csv_name = 'output.csv.gz' else: csv_name = 'output.csv' os.system(f"wget {url} -O {csv_name}") engine = create_engine(f'postgresql://{user}:{password}@{host}:{port}/{db}') df_iter = pd.read_csv(csv_name, iterator=True, chunksize=100000) df = next(df_iter) df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime) df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime) df.head(n=0).to_sql(name=table_name, con=engine, if_exists='replace') df.to_sql(name=table_name, con=engine, if_exists='append') while True: try: t_start = time() df = next(df_iter) df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime) df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime) df.to_sql(name=table_name, con=engine, if_exists='append') t_end = time() print('inserted another chunk, took %.3f second' % (t_end - t_start)) except StopIteration: print("Finished ingesting data into the postgres database") break if __name__ == '__main__': parser = argparse.ArgumentParser(description='Ingest CSV data to Postgres') parser.add_argument('--user', required=True, help='user name for postgres') parser.add_argument('--password', required=True, help='password for postgres') parser.add_argument('--host', required=True, help='host for postgres') parser.add_argument('--port', required=True, help='port for postgres') parser.add_argument('--db', required=True, help='database name for postgres') parser.add_argument('--table_name', required=True, help='name of the table where we will write the results to') parser.add_argument('--url', required=True, help='url of the csv file') args = parser.parse_args() main(args)
29.417722
115
0.642798
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import sys import pandas as pd print(sys.argv) day = sys.argv[1] # some fancy stuff with pandas print(f'job finished successfully for day = {day}')
12.909091
51
0.723684
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import io import os import requests import pandas as pd from google.cloud import storage """ Pre-reqs: 1. `pip install pandas pyarrow google-cloud-storage` 2. Set GOOGLE_APPLICATION_CREDENTIALS to your project/service-account key 3. Set GCP_GCS_BUCKET as your bucket or change default value of BUCKET """ # services = ['fhv','green','yellow'] init_url = 'https://github.com/DataTalksClub/nyc-tlc-data/releases/download/' # switch out the bucketname BUCKET = os.environ.get("GCP_GCS_BUCKET", "dtc-data-lake-bucketname") def upload_to_gcs(bucket, object_name, local_file): """ Ref: https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python """ # # WORKAROUND to prevent timeout for files > 6 MB on 800 kbps upload speed. # # (Ref: https://github.com/googleapis/python-storage/issues/74) # storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024 # 5 MB # storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024 # 5 MB client = storage.Client() bucket = client.bucket(bucket) blob = bucket.blob(object_name) blob.upload_from_filename(local_file) def web_to_gcs(year, service): for i in range(12): # sets the month part of the file_name string month = '0'+str(i+1) month = month[-2:] # csv file_name file_name = f"{service}_tripdata_{year}-{month}.csv.gz" # download it using requests via a pandas df request_url = f"{init_url}{service}/{file_name}" r = requests.get(request_url) open(file_name, 'wb').write(r.content) print(f"Local: {file_name}") # read it back into a parquet file df = pd.read_csv(file_name, compression='gzip') file_name = file_name.replace('.csv.gz', '.parquet') df.to_parquet(file_name, engine='pyarrow') print(f"Parquet: {file_name}") # upload it to gcs upload_to_gcs(BUCKET, f"{service}/{file_name}", file_name) print(f"GCS: {service}/{file_name}") web_to_gcs('2019', 'green') web_to_gcs('2020', 'green') # web_to_gcs('2019', 'yellow') # web_to_gcs('2020', 'yellow')
30.671642
93
0.642621
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
#!/usr/bin/env python # coding: utf-8 import argparse import pyspark from pyspark.sql import SparkSession from pyspark.sql import functions as F parser = argparse.ArgumentParser() parser.add_argument('--input_green', required=True) parser.add_argument('--input_yellow', required=True) parser.add_argument('--output', required=True) args = parser.parse_args() input_green = args.input_green input_yellow = args.input_yellow output = args.output spark = SparkSession.builder \ .appName('test') \ .getOrCreate() df_green = spark.read.parquet(input_green) df_green = df_green \ .withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \ .withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime') df_yellow = spark.read.parquet(input_yellow) df_yellow = df_yellow \ .withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \ .withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime') common_colums = [ 'VendorID', 'pickup_datetime', 'dropoff_datetime', 'store_and_fwd_flag', 'RatecodeID', 'PULocationID', 'DOLocationID', 'passenger_count', 'trip_distance', 'fare_amount', 'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'improvement_surcharge', 'total_amount', 'payment_type', 'congestion_surcharge' ] df_green_sel = df_green \ .select(common_colums) \ .withColumn('service_type', F.lit('green')) df_yellow_sel = df_yellow \ .select(common_colums) \ .withColumn('service_type', F.lit('yellow')) df_trips_data = df_green_sel.unionAll(df_yellow_sel) df_trips_data.registerTempTable('trips_data') df_result = spark.sql(""" SELECT -- Reveneue grouping PULocationID AS revenue_zone, date_trunc('month', pickup_datetime) AS revenue_month, service_type, -- Revenue calculation SUM(fare_amount) AS revenue_monthly_fare, SUM(extra) AS revenue_monthly_extra, SUM(mta_tax) AS revenue_monthly_mta_tax, SUM(tip_amount) AS revenue_monthly_tip_amount, SUM(tolls_amount) AS revenue_monthly_tolls_amount, SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge, SUM(total_amount) AS revenue_monthly_total_amount, SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge, -- Additional calculations AVG(passenger_count) AS avg_montly_passenger_count, AVG(trip_distance) AS avg_montly_trip_distance FROM trips_data GROUP BY 1, 2, 3 """) df_result.coalesce(1) \ .write.parquet(output, mode='overwrite')
21.75
72
0.690224
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
#!/usr/bin/env python # coding: utf-8 import argparse import pyspark from pyspark.sql import SparkSession from pyspark.sql import functions as F parser = argparse.ArgumentParser() parser.add_argument('--input_green', required=True) parser.add_argument('--input_yellow', required=True) parser.add_argument('--output', required=True) args = parser.parse_args() input_green = args.input_green input_yellow = args.input_yellow output = args.output spark = SparkSession.builder \ .appName('test') \ .getOrCreate() spark.conf.set('temporaryGcsBucket', 'dataproc-temp-europe-west6-828225226997-fckhkym8') df_green = spark.read.parquet(input_green) df_green = df_green \ .withColumnRenamed('lpep_pickup_datetime', 'pickup_datetime') \ .withColumnRenamed('lpep_dropoff_datetime', 'dropoff_datetime') df_yellow = spark.read.parquet(input_yellow) df_yellow = df_yellow \ .withColumnRenamed('tpep_pickup_datetime', 'pickup_datetime') \ .withColumnRenamed('tpep_dropoff_datetime', 'dropoff_datetime') common_colums = [ 'VendorID', 'pickup_datetime', 'dropoff_datetime', 'store_and_fwd_flag', 'RatecodeID', 'PULocationID', 'DOLocationID', 'passenger_count', 'trip_distance', 'fare_amount', 'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'improvement_surcharge', 'total_amount', 'payment_type', 'congestion_surcharge' ] df_green_sel = df_green \ .select(common_colums) \ .withColumn('service_type', F.lit('green')) df_yellow_sel = df_yellow \ .select(common_colums) \ .withColumn('service_type', F.lit('yellow')) df_trips_data = df_green_sel.unionAll(df_yellow_sel) df_trips_data.registerTempTable('trips_data') df_result = spark.sql(""" SELECT -- Reveneue grouping PULocationID AS revenue_zone, date_trunc('month', pickup_datetime) AS revenue_month, service_type, -- Revenue calculation SUM(fare_amount) AS revenue_monthly_fare, SUM(extra) AS revenue_monthly_extra, SUM(mta_tax) AS revenue_monthly_mta_tax, SUM(tip_amount) AS revenue_monthly_tip_amount, SUM(tolls_amount) AS revenue_monthly_tolls_amount, SUM(improvement_surcharge) AS revenue_monthly_improvement_surcharge, SUM(total_amount) AS revenue_monthly_total_amount, SUM(congestion_surcharge) AS revenue_monthly_congestion_surcharge, -- Additional calculations AVG(passenger_count) AS avg_montly_passenger_count, AVG(trip_distance) AS avg_montly_trip_distance FROM trips_data GROUP BY 1, 2, 3 """) df_result.write.format('bigquery') \ .option('table', output) \ .save()
22.069565
88
0.690422
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import argparse from typing import Dict, List from kafka import KafkaConsumer from settings import BOOTSTRAP_SERVERS, CONSUME_TOPIC_RIDES_CSV class RideCSVConsumer: def __init__(self, props: Dict): self.consumer = KafkaConsumer(**props) def consume_from_kafka(self, topics: List[str]): self.consumer.subscribe(topics=topics) print('Consuming from Kafka started') print('Available topics to consume: ', self.consumer.subscription()) while True: try: # SIGINT can't be handled when polling, limit timeout to 1 second. msg = self.consumer.poll(1.0) if msg is None or msg == {}: continue for msg_key, msg_values in msg.items(): for msg_val in msg_values: print(f'Key:{msg_val.key}-type({type(msg_val.key)}), ' f'Value:{msg_val.value}-type({type(msg_val.value)})') except KeyboardInterrupt: break self.consumer.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Kafka Consumer') parser.add_argument('--topic', type=str, default=CONSUME_TOPIC_RIDES_CSV) args = parser.parse_args() topic = args.topic config = { 'bootstrap_servers': [BOOTSTRAP_SERVERS], 'auto_offset_reset': 'earliest', 'enable_auto_commit': True, 'key_deserializer': lambda key: int(key.decode('utf-8')), 'value_deserializer': lambda value: value.decode('utf-8'), 'group_id': 'consumer.group.id.csv-example.1', } csv_consumer = RideCSVConsumer(props=config) csv_consumer.consume_from_kafka(topics=[topic])
35.25
83
0.59632
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import csv from time import sleep from typing import Dict from kafka import KafkaProducer from settings import BOOTSTRAP_SERVERS, INPUT_DATA_PATH, PRODUCE_TOPIC_RIDES_CSV def delivery_report(err, msg): if err is not None: print("Delivery failed for record {}: {}".format(msg.key(), err)) return print('Record {} successfully produced to {} [{}] at offset {}'.format( msg.key(), msg.topic(), msg.partition(), msg.offset())) class RideCSVProducer: def __init__(self, props: Dict): self.producer = KafkaProducer(**props) # self.producer = Producer(producer_props) @staticmethod def read_records(resource_path: str): records, ride_keys = [], [] i = 0 with open(resource_path, 'r') as f: reader = csv.reader(f) header = next(reader) # skip the header for row in reader: # vendor_id, passenger_count, trip_distance, payment_type, total_amount records.append(f'{row[0]}, {row[1]}, {row[2]}, {row[3]}, {row[4]}, {row[9]}, {row[16]}') ride_keys.append(str(row[0])) i += 1 if i == 5: break return zip(ride_keys, records) def publish(self, topic: str, records: [str, str]): for key_value in records: key, value = key_value try: self.producer.send(topic=topic, key=key, value=value) print(f"Producing record for <key: {key}, value:{value}>") except KeyboardInterrupt: break except Exception as e: print(f"Exception while producing record - {value}: {e}") self.producer.flush() sleep(1) if __name__ == "__main__": config = { 'bootstrap_servers': [BOOTSTRAP_SERVERS], 'key_serializer': lambda x: x.encode('utf-8'), 'value_serializer': lambda x: x.encode('utf-8') } producer = RideCSVProducer(props=config) ride_records = producer.read_records(resource_path=INPUT_DATA_PATH) print(ride_records) producer.publish(topic=PRODUCE_TOPIC_RIDES_CSV, records=ride_records)
33.571429
104
0.574644
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from typing import List, Dict class RideRecord: def __init__(self, arr: List[str]): self.vendor_id = int(arr[0]) self.passenger_count = int(arr[1]) self.trip_distance = float(arr[2]) self.payment_type = int(arr[3]) self.total_amount = float(arr[4]) @classmethod def from_dict(cls, d: Dict): return cls(arr=[ d['vendor_id'], d['passenger_count'], d['trip_distance'], d['payment_type'], d['total_amount'] ] ) def __repr__(self): return f'{self.__class__.__name__}: {self.__dict__}' def dict_to_ride_record(obj, ctx): if obj is None: return None return RideRecord.from_dict(obj) def ride_record_to_dict(ride_record: RideRecord, ctx): return ride_record.__dict__
21.648649
60
0.540024
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from typing import Dict class RideRecordKey: def __init__(self, vendor_id): self.vendor_id = vendor_id @classmethod def from_dict(cls, d: Dict): return cls(vendor_id=d['vendor_id']) def __repr__(self): return f'{self.__class__.__name__}: {self.__dict__}' def dict_to_ride_record_key(obj, ctx): if obj is None: return None return RideRecordKey.from_dict(obj) def ride_record_key_to_dict(ride_record_key: RideRecordKey, ctx): return ride_record_key.__dict__
20.04
65
0.619048
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from typing import List, Dict from decimal import Decimal from datetime import datetime class Ride: def __init__(self, arr: List[str]): self.vendor_id = arr[0] self.tpep_pickup_datetime = datetime.strptime(arr[1], "%Y-%m-%d %H:%M:%S"), self.tpep_dropoff_datetime = datetime.strptime(arr[2], "%Y-%m-%d %H:%M:%S"), self.passenger_count = int(arr[3]) self.trip_distance = Decimal(arr[4]) self.rate_code_id = int(arr[5]) self.store_and_fwd_flag = arr[6] self.pu_location_id = int(arr[7]) self.do_location_id = int(arr[8]) self.payment_type = arr[9] self.fare_amount = Decimal(arr[10]) self.extra = Decimal(arr[11]) self.mta_tax = Decimal(arr[12]) self.tip_amount = Decimal(arr[13]) self.tolls_amount = Decimal(arr[14]) self.improvement_surcharge = Decimal(arr[15]) self.total_amount = Decimal(arr[16]) self.congestion_surcharge = Decimal(arr[17]) @classmethod def from_dict(cls, d: Dict): return cls(arr=[ d['vendor_id'], d['tpep_pickup_datetime'][0], d['tpep_dropoff_datetime'][0], d['passenger_count'], d['trip_distance'], d['rate_code_id'], d['store_and_fwd_flag'], d['pu_location_id'], d['do_location_id'], d['payment_type'], d['fare_amount'], d['extra'], d['mta_tax'], d['tip_amount'], d['tolls_amount'], d['improvement_surcharge'], d['total_amount'], d['congestion_surcharge'], ] ) def __repr__(self): return f'{self.__class__.__name__}: {self.__dict__}'
32.396226
84
0.520068
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import faust from taxi_rides import TaxiRide from faust import current_event app = faust.App('datatalksclub.stream.v3', broker='kafka://localhost:9092', consumer_auto_offset_reset="earliest") topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide) high_amount_rides = app.topic('datatalks.yellow_taxi_rides.high_amount') low_amount_rides = app.topic('datatalks.yellow_taxi_rides.low_amount') @app.agent(topic) async def process(stream): async for event in stream: if event.total_amount >= 40.0: await current_event().forward(high_amount_rides) else: await current_event().forward(low_amount_rides) if __name__ == '__main__': app.main()
31.318182
114
0.701408
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import csv from json import dumps from kafka import KafkaProducer from time import sleep producer = KafkaProducer(bootstrap_servers=['localhost:9092'], key_serializer=lambda x: dumps(x).encode('utf-8'), value_serializer=lambda x: dumps(x).encode('utf-8')) file = open('../../resources/rides.csv') csvreader = csv.reader(file) header = next(csvreader) for row in csvreader: key = {"vendorId": int(row[0])} value = {"vendorId": int(row[0]), "passenger_count": int(row[3]), "trip_distance": float(row[4]), "payment_type": int(row[9]), "total_amount": float(row[16])} producer.send('datatalkclub.yellow_taxi_ride.json', value=value, key=key) print("producing") sleep(1)
36
162
0.648173
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import faust from taxi_rides import TaxiRide app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092') topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide) @app.agent(topic) async def start_reading(records): async for record in records: print(record) if __name__ == '__main__': app.main()
19.823529
76
0.694051
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import faust from taxi_rides import TaxiRide app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092') topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide) vendor_rides = app.Table('vendor_rides', default=int) @app.agent(topic) async def process(stream): async for event in stream.group_by(TaxiRide.vendorId): vendor_rides[event.vendorId] += 1 if __name__ == '__main__': app.main()
23.833333
76
0.704036
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
import faust class TaxiRide(faust.Record, validation=True): vendorId: str passenger_count: int trip_distance: float payment_type: int total_amount: float
16.7
46
0.704545
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from datetime import timedelta import faust from taxi_rides import TaxiRide app = faust.App('datatalksclub.stream.v2', broker='kafka://localhost:9092') topic = app.topic('datatalkclub.yellow_taxi_ride.json', value_type=TaxiRide) vendor_rides = app.Table('vendor_rides_windowed', default=int).tumbling( timedelta(minutes=1), expires=timedelta(hours=1), ) @app.agent(topic) async def process(stream): async for event in stream.group_by(TaxiRide.vendorId): vendor_rides[event.vendorId] += 1 if __name__ == '__main__': app.main()
23.26087
76
0.710952
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Python
from pyspark.sql import SparkSession import pyspark.sql.functions as F from settings import RIDE_SCHEMA, CONSUME_TOPIC_RIDES_CSV, TOPIC_WINDOWED_VENDOR_ID_COUNT def read_from_kafka(consume_topic: str): # Spark Streaming DataFrame, connect to Kafka topic served at host in bootrap.servers option df_stream = spark \ .readStream \ .format("kafka") \ .option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \ .option("subscribe", consume_topic) \ .option("startingOffsets", "earliest") \ .option("checkpointLocation", "checkpoint") \ .load() return df_stream def parse_ride_from_kafka_message(df, schema): """ take a Spark Streaming df and parse value col based on <schema>, return streaming df cols in schema """ assert df.isStreaming is True, "DataFrame doesn't receive streaming data" df = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)") # split attributes to nested array in one Column col = F.split(df['value'], ', ') # expand col to multiple top-level columns for idx, field in enumerate(schema): df = df.withColumn(field.name, col.getItem(idx).cast(field.dataType)) return df.select([field.name for field in schema]) def sink_console(df, output_mode: str = 'complete', processing_time: str = '5 seconds'): write_query = df.writeStream \ .outputMode(output_mode) \ .trigger(processingTime=processing_time) \ .format("console") \ .option("truncate", False) \ .start() return write_query # pyspark.sql.streaming.StreamingQuery def sink_memory(df, query_name, query_template): query_df = df \ .writeStream \ .queryName(query_name) \ .format("memory") \ .start() query_str = query_template.format(table_name=query_name) query_results = spark.sql(query_str) return query_results, query_df def sink_kafka(df, topic): write_query = df.writeStream \ .format("kafka") \ .option("kafka.bootstrap.servers", "localhost:9092,broker:29092") \ .outputMode('complete') \ .option("topic", topic) \ .option("checkpointLocation", "checkpoint") \ .start() return write_query def prepare_df_to_kafka_sink(df, value_columns, key_column=None): columns = df.columns df = df.withColumn("value", F.concat_ws(', ', *value_columns)) if key_column: df = df.withColumnRenamed(key_column, "key") df = df.withColumn("key", df.key.cast('string')) return df.select(['key', 'value']) def op_groupby(df, column_names): df_aggregation = df.groupBy(column_names).count() return df_aggregation def op_windowed_groupby(df, window_duration, slide_duration): df_windowed_aggregation = df.groupBy( F.window(timeColumn=df.tpep_pickup_datetime, windowDuration=window_duration, slideDuration=slide_duration), df.vendor_id ).count() return df_windowed_aggregation if __name__ == "__main__": spark = SparkSession.builder.appName('streaming-examples').getOrCreate() spark.sparkContext.setLogLevel('WARN') # read_streaming data df_consume_stream = read_from_kafka(consume_topic=CONSUME_TOPIC_RIDES_CSV) print(df_consume_stream.printSchema()) # parse streaming data df_rides = parse_ride_from_kafka_message(df_consume_stream, RIDE_SCHEMA) print(df_rides.printSchema()) sink_console(df_rides, output_mode='append') df_trip_count_by_vendor_id = op_groupby(df_rides, ['vendor_id']) df_trip_count_by_pickup_date_vendor_id = op_windowed_groupby(df_rides, window_duration="10 minutes", slide_duration='5 minutes') # write the output out to the console for debugging / testing sink_console(df_trip_count_by_vendor_id) # write the output to the kafka topic df_trip_count_messages = prepare_df_to_kafka_sink(df=df_trip_count_by_pickup_date_vendor_id, value_columns=['count'], key_column='vendor_id') kafka_sink_query = sink_kafka(df=df_trip_count_messages, topic=TOPIC_WINDOWED_VENDOR_ID_COUNT) spark.streams.awaitAnyTermination()
35.586207
115
0.65449
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
/** * Autogenerated by Avro * * DO NOT EDIT DIRECTLY */ package schemaregistry; import org.apache.avro.generic.GenericArray; import org.apache.avro.specific.SpecificData; import org.apache.avro.util.Utf8; import org.apache.avro.message.BinaryMessageEncoder; import org.apache.avro.message.BinaryMessageDecoder; import org.apache.avro.message.SchemaStore; @org.apache.avro.specific.AvroGenerated public class RideRecord extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { private static final long serialVersionUID = 6805437803204402942L; public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecord\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendor_id\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}"); public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } private static final SpecificData MODEL$ = new SpecificData(); private static final BinaryMessageEncoder<RideRecord> ENCODER = new BinaryMessageEncoder<>(MODEL$, SCHEMA$); private static final BinaryMessageDecoder<RideRecord> DECODER = new BinaryMessageDecoder<>(MODEL$, SCHEMA$); /** * Return the BinaryMessageEncoder instance used by this class. * @return the message encoder used by this class */ public static BinaryMessageEncoder<RideRecord> getEncoder() { return ENCODER; } /** * Return the BinaryMessageDecoder instance used by this class. * @return the message decoder used by this class */ public static BinaryMessageDecoder<RideRecord> getDecoder() { return DECODER; } /** * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. * @param resolver a {@link SchemaStore} used to find schemas by fingerprint * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore */ public static BinaryMessageDecoder<RideRecord> createDecoder(SchemaStore resolver) { return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver); } /** * Serializes this RideRecord to a ByteBuffer. * @return a buffer holding the serialized data for this instance * @throws java.io.IOException if this instance could not be serialized */ public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { return ENCODER.encode(this); } /** * Deserializes a RideRecord from a ByteBuffer. * @param b a byte buffer holding serialized data for an instance of this class * @return a RideRecord instance decoded from the given buffer * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class */ public static RideRecord fromByteBuffer( java.nio.ByteBuffer b) throws java.io.IOException { return DECODER.decode(b); } private java.lang.String vendor_id; private int passenger_count; private double trip_distance; /** * Default constructor. Note that this does not initialize fields * to their default values from the schema. If that is desired then * one should use <code>newBuilder()</code>. */ public RideRecord() {} /** * All-args constructor. * @param vendor_id The new value for vendor_id * @param passenger_count The new value for passenger_count * @param trip_distance The new value for trip_distance */ public RideRecord(java.lang.String vendor_id, java.lang.Integer passenger_count, java.lang.Double trip_distance) { this.vendor_id = vendor_id; this.passenger_count = passenger_count; this.trip_distance = trip_distance; } @Override public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } @Override public org.apache.avro.Schema getSchema() { return SCHEMA$; } // Used by DatumWriter. Applications should not call. @Override public java.lang.Object get(int field$) { switch (field$) { case 0: return vendor_id; case 1: return passenger_count; case 2: return trip_distance; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } // Used by DatumReader. Applications should not call. @Override @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: vendor_id = value$ != null ? value$.toString() : null; break; case 1: passenger_count = (java.lang.Integer)value$; break; case 2: trip_distance = (java.lang.Double)value$; break; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } /** * Gets the value of the 'vendor_id' field. * @return The value of the 'vendor_id' field. */ public java.lang.String getVendorId() { return vendor_id; } /** * Sets the value of the 'vendor_id' field. * @param value the value to set. */ public void setVendorId(java.lang.String value) { this.vendor_id = value; } /** * Gets the value of the 'passenger_count' field. * @return The value of the 'passenger_count' field. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value the value to set. */ public void setPassengerCount(int value) { this.passenger_count = value; } /** * Gets the value of the 'trip_distance' field. * @return The value of the 'trip_distance' field. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value the value to set. */ public void setTripDistance(double value) { this.trip_distance = value; } /** * Creates a new RideRecord RecordBuilder. * @return A new RideRecord RecordBuilder */ public static schemaregistry.RideRecord.Builder newBuilder() { return new schemaregistry.RideRecord.Builder(); } /** * Creates a new RideRecord RecordBuilder by copying an existing Builder. * @param other The existing builder to copy. * @return A new RideRecord RecordBuilder */ public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord.Builder other) { if (other == null) { return new schemaregistry.RideRecord.Builder(); } else { return new schemaregistry.RideRecord.Builder(other); } } /** * Creates a new RideRecord RecordBuilder by copying an existing RideRecord instance. * @param other The existing instance to copy. * @return A new RideRecord RecordBuilder */ public static schemaregistry.RideRecord.Builder newBuilder(schemaregistry.RideRecord other) { if (other == null) { return new schemaregistry.RideRecord.Builder(); } else { return new schemaregistry.RideRecord.Builder(other); } } /** * RecordBuilder for RideRecord instances. */ @org.apache.avro.specific.AvroGenerated public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecord> implements org.apache.avro.data.RecordBuilder<RideRecord> { private java.lang.String vendor_id; private int passenger_count; private double trip_distance; /** Creates a new Builder */ private Builder() { super(SCHEMA$, MODEL$); } /** * Creates a Builder by copying an existing Builder. * @param other The existing Builder to copy. */ private Builder(schemaregistry.RideRecord.Builder other) { super(other); if (isValidValue(fields()[0], other.vendor_id)) { this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id); fieldSetFlags()[0] = other.fieldSetFlags()[0]; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = other.fieldSetFlags()[1]; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = other.fieldSetFlags()[2]; } } /** * Creates a Builder by copying an existing RideRecord instance * @param other The existing instance to copy. */ private Builder(schemaregistry.RideRecord other) { super(SCHEMA$, MODEL$); if (isValidValue(fields()[0], other.vendor_id)) { this.vendor_id = data().deepCopy(fields()[0].schema(), other.vendor_id); fieldSetFlags()[0] = true; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = true; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = true; } } /** * Gets the value of the 'vendor_id' field. * @return The value. */ public java.lang.String getVendorId() { return vendor_id; } /** * Sets the value of the 'vendor_id' field. * @param value The value of 'vendor_id'. * @return This builder. */ public schemaregistry.RideRecord.Builder setVendorId(java.lang.String value) { validate(fields()[0], value); this.vendor_id = value; fieldSetFlags()[0] = true; return this; } /** * Checks whether the 'vendor_id' field has been set. * @return True if the 'vendor_id' field has been set, false otherwise. */ public boolean hasVendorId() { return fieldSetFlags()[0]; } /** * Clears the value of the 'vendor_id' field. * @return This builder. */ public schemaregistry.RideRecord.Builder clearVendorId() { vendor_id = null; fieldSetFlags()[0] = false; return this; } /** * Gets the value of the 'passenger_count' field. * @return The value. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value The value of 'passenger_count'. * @return This builder. */ public schemaregistry.RideRecord.Builder setPassengerCount(int value) { validate(fields()[1], value); this.passenger_count = value; fieldSetFlags()[1] = true; return this; } /** * Checks whether the 'passenger_count' field has been set. * @return True if the 'passenger_count' field has been set, false otherwise. */ public boolean hasPassengerCount() { return fieldSetFlags()[1]; } /** * Clears the value of the 'passenger_count' field. * @return This builder. */ public schemaregistry.RideRecord.Builder clearPassengerCount() { fieldSetFlags()[1] = false; return this; } /** * Gets the value of the 'trip_distance' field. * @return The value. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value The value of 'trip_distance'. * @return This builder. */ public schemaregistry.RideRecord.Builder setTripDistance(double value) { validate(fields()[2], value); this.trip_distance = value; fieldSetFlags()[2] = true; return this; } /** * Checks whether the 'trip_distance' field has been set. * @return True if the 'trip_distance' field has been set, false otherwise. */ public boolean hasTripDistance() { return fieldSetFlags()[2]; } /** * Clears the value of the 'trip_distance' field. * @return This builder. */ public schemaregistry.RideRecord.Builder clearTripDistance() { fieldSetFlags()[2] = false; return this; } @Override @SuppressWarnings("unchecked") public RideRecord build() { try { RideRecord record = new RideRecord(); record.vendor_id = fieldSetFlags()[0] ? this.vendor_id : (java.lang.String) defaultValue(fields()[0]); record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]); record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]); return record; } catch (org.apache.avro.AvroMissingFieldException e) { throw e; } catch (java.lang.Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumWriter<RideRecord> WRITER$ = (org.apache.avro.io.DatumWriter<RideRecord>)MODEL$.createDatumWriter(SCHEMA$); @Override public void writeExternal(java.io.ObjectOutput out) throws java.io.IOException { WRITER$.write(this, SpecificData.getEncoder(out)); } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumReader<RideRecord> READER$ = (org.apache.avro.io.DatumReader<RideRecord>)MODEL$.createDatumReader(SCHEMA$); @Override public void readExternal(java.io.ObjectInput in) throws java.io.IOException { READER$.read(this, SpecificData.getDecoder(in)); } @Override protected boolean hasCustomCoders() { return true; } @Override public void customEncode(org.apache.avro.io.Encoder out) throws java.io.IOException { out.writeString(this.vendor_id); out.writeInt(this.passenger_count); out.writeDouble(this.trip_distance); } @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) throws java.io.IOException { org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); if (fieldOrder == null) { this.vendor_id = in.readString(); this.passenger_count = in.readInt(); this.trip_distance = in.readDouble(); } else { for (int i = 0; i < 3; i++) { switch (fieldOrder[i].pos()) { case 0: this.vendor_id = in.readString(); break; case 1: this.passenger_count = in.readInt(); break; case 2: this.trip_distance = in.readDouble(); break; default: throw new java.io.IOException("Corrupt ResolvingDecoder."); } } } } }
29.533473
377
0.659381
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
/** * Autogenerated by Avro * * DO NOT EDIT DIRECTLY */ package schemaregistry; import org.apache.avro.generic.GenericArray; import org.apache.avro.specific.SpecificData; import org.apache.avro.util.Utf8; import org.apache.avro.message.BinaryMessageEncoder; import org.apache.avro.message.BinaryMessageDecoder; import org.apache.avro.message.SchemaStore; @org.apache.avro.specific.AvroGenerated public class RideRecordCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { private static final long serialVersionUID = 7163300507090021229L; public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"},{\"name\":\"pu_location_id\",\"type\":[\"null\",\"long\"],\"default\":null}]}"); public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } private static final SpecificData MODEL$ = new SpecificData(); private static final BinaryMessageEncoder<RideRecordCompatible> ENCODER = new BinaryMessageEncoder<>(MODEL$, SCHEMA$); private static final BinaryMessageDecoder<RideRecordCompatible> DECODER = new BinaryMessageDecoder<>(MODEL$, SCHEMA$); /** * Return the BinaryMessageEncoder instance used by this class. * @return the message encoder used by this class */ public static BinaryMessageEncoder<RideRecordCompatible> getEncoder() { return ENCODER; } /** * Return the BinaryMessageDecoder instance used by this class. * @return the message decoder used by this class */ public static BinaryMessageDecoder<RideRecordCompatible> getDecoder() { return DECODER; } /** * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. * @param resolver a {@link SchemaStore} used to find schemas by fingerprint * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore */ public static BinaryMessageDecoder<RideRecordCompatible> createDecoder(SchemaStore resolver) { return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver); } /** * Serializes this RideRecordCompatible to a ByteBuffer. * @return a buffer holding the serialized data for this instance * @throws java.io.IOException if this instance could not be serialized */ public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { return ENCODER.encode(this); } /** * Deserializes a RideRecordCompatible from a ByteBuffer. * @param b a byte buffer holding serialized data for an instance of this class * @return a RideRecordCompatible instance decoded from the given buffer * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class */ public static RideRecordCompatible fromByteBuffer( java.nio.ByteBuffer b) throws java.io.IOException { return DECODER.decode(b); } private java.lang.String vendorId; private int passenger_count; private double trip_distance; private java.lang.Long pu_location_id; /** * Default constructor. Note that this does not initialize fields * to their default values from the schema. If that is desired then * one should use <code>newBuilder()</code>. */ public RideRecordCompatible() {} /** * All-args constructor. * @param vendorId The new value for vendorId * @param passenger_count The new value for passenger_count * @param trip_distance The new value for trip_distance * @param pu_location_id The new value for pu_location_id */ public RideRecordCompatible(java.lang.String vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance, java.lang.Long pu_location_id) { this.vendorId = vendorId; this.passenger_count = passenger_count; this.trip_distance = trip_distance; this.pu_location_id = pu_location_id; } @Override public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } @Override public org.apache.avro.Schema getSchema() { return SCHEMA$; } // Used by DatumWriter. Applications should not call. @Override public java.lang.Object get(int field$) { switch (field$) { case 0: return vendorId; case 1: return passenger_count; case 2: return trip_distance; case 3: return pu_location_id; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } // Used by DatumReader. Applications should not call. @Override @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: vendorId = value$ != null ? value$.toString() : null; break; case 1: passenger_count = (java.lang.Integer)value$; break; case 2: trip_distance = (java.lang.Double)value$; break; case 3: pu_location_id = (java.lang.Long)value$; break; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } /** * Gets the value of the 'vendorId' field. * @return The value of the 'vendorId' field. */ public java.lang.String getVendorId() { return vendorId; } /** * Sets the value of the 'vendorId' field. * @param value the value to set. */ public void setVendorId(java.lang.String value) { this.vendorId = value; } /** * Gets the value of the 'passenger_count' field. * @return The value of the 'passenger_count' field. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value the value to set. */ public void setPassengerCount(int value) { this.passenger_count = value; } /** * Gets the value of the 'trip_distance' field. * @return The value of the 'trip_distance' field. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value the value to set. */ public void setTripDistance(double value) { this.trip_distance = value; } /** * Gets the value of the 'pu_location_id' field. * @return The value of the 'pu_location_id' field. */ public java.lang.Long getPuLocationId() { return pu_location_id; } /** * Sets the value of the 'pu_location_id' field. * @param value the value to set. */ public void setPuLocationId(java.lang.Long value) { this.pu_location_id = value; } /** * Creates a new RideRecordCompatible RecordBuilder. * @return A new RideRecordCompatible RecordBuilder */ public static schemaregistry.RideRecordCompatible.Builder newBuilder() { return new schemaregistry.RideRecordCompatible.Builder(); } /** * Creates a new RideRecordCompatible RecordBuilder by copying an existing Builder. * @param other The existing builder to copy. * @return A new RideRecordCompatible RecordBuilder */ public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible.Builder other) { if (other == null) { return new schemaregistry.RideRecordCompatible.Builder(); } else { return new schemaregistry.RideRecordCompatible.Builder(other); } } /** * Creates a new RideRecordCompatible RecordBuilder by copying an existing RideRecordCompatible instance. * @param other The existing instance to copy. * @return A new RideRecordCompatible RecordBuilder */ public static schemaregistry.RideRecordCompatible.Builder newBuilder(schemaregistry.RideRecordCompatible other) { if (other == null) { return new schemaregistry.RideRecordCompatible.Builder(); } else { return new schemaregistry.RideRecordCompatible.Builder(other); } } /** * RecordBuilder for RideRecordCompatible instances. */ @org.apache.avro.specific.AvroGenerated public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordCompatible> implements org.apache.avro.data.RecordBuilder<RideRecordCompatible> { private java.lang.String vendorId; private int passenger_count; private double trip_distance; private java.lang.Long pu_location_id; /** Creates a new Builder */ private Builder() { super(SCHEMA$, MODEL$); } /** * Creates a Builder by copying an existing Builder. * @param other The existing Builder to copy. */ private Builder(schemaregistry.RideRecordCompatible.Builder other) { super(other); if (isValidValue(fields()[0], other.vendorId)) { this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId); fieldSetFlags()[0] = other.fieldSetFlags()[0]; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = other.fieldSetFlags()[1]; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = other.fieldSetFlags()[2]; } if (isValidValue(fields()[3], other.pu_location_id)) { this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id); fieldSetFlags()[3] = other.fieldSetFlags()[3]; } } /** * Creates a Builder by copying an existing RideRecordCompatible instance * @param other The existing instance to copy. */ private Builder(schemaregistry.RideRecordCompatible other) { super(SCHEMA$, MODEL$); if (isValidValue(fields()[0], other.vendorId)) { this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId); fieldSetFlags()[0] = true; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = true; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = true; } if (isValidValue(fields()[3], other.pu_location_id)) { this.pu_location_id = data().deepCopy(fields()[3].schema(), other.pu_location_id); fieldSetFlags()[3] = true; } } /** * Gets the value of the 'vendorId' field. * @return The value. */ public java.lang.String getVendorId() { return vendorId; } /** * Sets the value of the 'vendorId' field. * @param value The value of 'vendorId'. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder setVendorId(java.lang.String value) { validate(fields()[0], value); this.vendorId = value; fieldSetFlags()[0] = true; return this; } /** * Checks whether the 'vendorId' field has been set. * @return True if the 'vendorId' field has been set, false otherwise. */ public boolean hasVendorId() { return fieldSetFlags()[0]; } /** * Clears the value of the 'vendorId' field. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder clearVendorId() { vendorId = null; fieldSetFlags()[0] = false; return this; } /** * Gets the value of the 'passenger_count' field. * @return The value. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value The value of 'passenger_count'. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder setPassengerCount(int value) { validate(fields()[1], value); this.passenger_count = value; fieldSetFlags()[1] = true; return this; } /** * Checks whether the 'passenger_count' field has been set. * @return True if the 'passenger_count' field has been set, false otherwise. */ public boolean hasPassengerCount() { return fieldSetFlags()[1]; } /** * Clears the value of the 'passenger_count' field. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder clearPassengerCount() { fieldSetFlags()[1] = false; return this; } /** * Gets the value of the 'trip_distance' field. * @return The value. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value The value of 'trip_distance'. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder setTripDistance(double value) { validate(fields()[2], value); this.trip_distance = value; fieldSetFlags()[2] = true; return this; } /** * Checks whether the 'trip_distance' field has been set. * @return True if the 'trip_distance' field has been set, false otherwise. */ public boolean hasTripDistance() { return fieldSetFlags()[2]; } /** * Clears the value of the 'trip_distance' field. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder clearTripDistance() { fieldSetFlags()[2] = false; return this; } /** * Gets the value of the 'pu_location_id' field. * @return The value. */ public java.lang.Long getPuLocationId() { return pu_location_id; } /** * Sets the value of the 'pu_location_id' field. * @param value The value of 'pu_location_id'. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder setPuLocationId(java.lang.Long value) { validate(fields()[3], value); this.pu_location_id = value; fieldSetFlags()[3] = true; return this; } /** * Checks whether the 'pu_location_id' field has been set. * @return True if the 'pu_location_id' field has been set, false otherwise. */ public boolean hasPuLocationId() { return fieldSetFlags()[3]; } /** * Clears the value of the 'pu_location_id' field. * @return This builder. */ public schemaregistry.RideRecordCompatible.Builder clearPuLocationId() { pu_location_id = null; fieldSetFlags()[3] = false; return this; } @Override @SuppressWarnings("unchecked") public RideRecordCompatible build() { try { RideRecordCompatible record = new RideRecordCompatible(); record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.String) defaultValue(fields()[0]); record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]); record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]); record.pu_location_id = fieldSetFlags()[3] ? this.pu_location_id : (java.lang.Long) defaultValue(fields()[3]); return record; } catch (org.apache.avro.AvroMissingFieldException e) { throw e; } catch (java.lang.Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumWriter<RideRecordCompatible> WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordCompatible>)MODEL$.createDatumWriter(SCHEMA$); @Override public void writeExternal(java.io.ObjectOutput out) throws java.io.IOException { WRITER$.write(this, SpecificData.getEncoder(out)); } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumReader<RideRecordCompatible> READER$ = (org.apache.avro.io.DatumReader<RideRecordCompatible>)MODEL$.createDatumReader(SCHEMA$); @Override public void readExternal(java.io.ObjectInput in) throws java.io.IOException { READER$.read(this, SpecificData.getDecoder(in)); } @Override protected boolean hasCustomCoders() { return true; } @Override public void customEncode(org.apache.avro.io.Encoder out) throws java.io.IOException { out.writeString(this.vendorId); out.writeInt(this.passenger_count); out.writeDouble(this.trip_distance); if (this.pu_location_id == null) { out.writeIndex(0); out.writeNull(); } else { out.writeIndex(1); out.writeLong(this.pu_location_id); } } @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) throws java.io.IOException { org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); if (fieldOrder == null) { this.vendorId = in.readString(); this.passenger_count = in.readInt(); this.trip_distance = in.readDouble(); if (in.readIndex() != 1) { in.readNull(); this.pu_location_id = null; } else { this.pu_location_id = in.readLong(); } } else { for (int i = 0; i < 4; i++) { switch (fieldOrder[i].pos()) { case 0: this.vendorId = in.readString(); break; case 1: this.passenger_count = in.readInt(); break; case 2: this.trip_distance = in.readDouble(); break; case 3: if (in.readIndex() != 1) { in.readNull(); this.pu_location_id = null; } else { this.pu_location_id = in.readLong(); } break; default: throw new java.io.IOException("Corrupt ResolvingDecoder."); } } } } }
30.317073
462
0.657858
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
/** * Autogenerated by Avro * * DO NOT EDIT DIRECTLY */ package schemaregistry; import org.apache.avro.generic.GenericArray; import org.apache.avro.specific.SpecificData; import org.apache.avro.util.Utf8; import org.apache.avro.message.BinaryMessageEncoder; import org.apache.avro.message.BinaryMessageDecoder; import org.apache.avro.message.SchemaStore; @org.apache.avro.specific.AvroGenerated public class RideRecordNoneCompatible extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { private static final long serialVersionUID = -4618980179396772493L; public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"RideRecordNoneCompatible\",\"namespace\":\"schemaregistry\",\"fields\":[{\"name\":\"vendorId\",\"type\":\"int\"},{\"name\":\"passenger_count\",\"type\":\"int\"},{\"name\":\"trip_distance\",\"type\":\"double\"}]}"); public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } private static final SpecificData MODEL$ = new SpecificData(); private static final BinaryMessageEncoder<RideRecordNoneCompatible> ENCODER = new BinaryMessageEncoder<>(MODEL$, SCHEMA$); private static final BinaryMessageDecoder<RideRecordNoneCompatible> DECODER = new BinaryMessageDecoder<>(MODEL$, SCHEMA$); /** * Return the BinaryMessageEncoder instance used by this class. * @return the message encoder used by this class */ public static BinaryMessageEncoder<RideRecordNoneCompatible> getEncoder() { return ENCODER; } /** * Return the BinaryMessageDecoder instance used by this class. * @return the message decoder used by this class */ public static BinaryMessageDecoder<RideRecordNoneCompatible> getDecoder() { return DECODER; } /** * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. * @param resolver a {@link SchemaStore} used to find schemas by fingerprint * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore */ public static BinaryMessageDecoder<RideRecordNoneCompatible> createDecoder(SchemaStore resolver) { return new BinaryMessageDecoder<>(MODEL$, SCHEMA$, resolver); } /** * Serializes this RideRecordNoneCompatible to a ByteBuffer. * @return a buffer holding the serialized data for this instance * @throws java.io.IOException if this instance could not be serialized */ public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { return ENCODER.encode(this); } /** * Deserializes a RideRecordNoneCompatible from a ByteBuffer. * @param b a byte buffer holding serialized data for an instance of this class * @return a RideRecordNoneCompatible instance decoded from the given buffer * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class */ public static RideRecordNoneCompatible fromByteBuffer( java.nio.ByteBuffer b) throws java.io.IOException { return DECODER.decode(b); } private int vendorId; private int passenger_count; private double trip_distance; /** * Default constructor. Note that this does not initialize fields * to their default values from the schema. If that is desired then * one should use <code>newBuilder()</code>. */ public RideRecordNoneCompatible() {} /** * All-args constructor. * @param vendorId The new value for vendorId * @param passenger_count The new value for passenger_count * @param trip_distance The new value for trip_distance */ public RideRecordNoneCompatible(java.lang.Integer vendorId, java.lang.Integer passenger_count, java.lang.Double trip_distance) { this.vendorId = vendorId; this.passenger_count = passenger_count; this.trip_distance = trip_distance; } @Override public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } @Override public org.apache.avro.Schema getSchema() { return SCHEMA$; } // Used by DatumWriter. Applications should not call. @Override public java.lang.Object get(int field$) { switch (field$) { case 0: return vendorId; case 1: return passenger_count; case 2: return trip_distance; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } // Used by DatumReader. Applications should not call. @Override @SuppressWarnings(value="unchecked") public void put(int field$, java.lang.Object value$) { switch (field$) { case 0: vendorId = (java.lang.Integer)value$; break; case 1: passenger_count = (java.lang.Integer)value$; break; case 2: trip_distance = (java.lang.Double)value$; break; default: throw new IndexOutOfBoundsException("Invalid index: " + field$); } } /** * Gets the value of the 'vendorId' field. * @return The value of the 'vendorId' field. */ public int getVendorId() { return vendorId; } /** * Sets the value of the 'vendorId' field. * @param value the value to set. */ public void setVendorId(int value) { this.vendorId = value; } /** * Gets the value of the 'passenger_count' field. * @return The value of the 'passenger_count' field. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value the value to set. */ public void setPassengerCount(int value) { this.passenger_count = value; } /** * Gets the value of the 'trip_distance' field. * @return The value of the 'trip_distance' field. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value the value to set. */ public void setTripDistance(double value) { this.trip_distance = value; } /** * Creates a new RideRecordNoneCompatible RecordBuilder. * @return A new RideRecordNoneCompatible RecordBuilder */ public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder() { return new schemaregistry.RideRecordNoneCompatible.Builder(); } /** * Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing Builder. * @param other The existing builder to copy. * @return A new RideRecordNoneCompatible RecordBuilder */ public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible.Builder other) { if (other == null) { return new schemaregistry.RideRecordNoneCompatible.Builder(); } else { return new schemaregistry.RideRecordNoneCompatible.Builder(other); } } /** * Creates a new RideRecordNoneCompatible RecordBuilder by copying an existing RideRecordNoneCompatible instance. * @param other The existing instance to copy. * @return A new RideRecordNoneCompatible RecordBuilder */ public static schemaregistry.RideRecordNoneCompatible.Builder newBuilder(schemaregistry.RideRecordNoneCompatible other) { if (other == null) { return new schemaregistry.RideRecordNoneCompatible.Builder(); } else { return new schemaregistry.RideRecordNoneCompatible.Builder(other); } } /** * RecordBuilder for RideRecordNoneCompatible instances. */ @org.apache.avro.specific.AvroGenerated public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<RideRecordNoneCompatible> implements org.apache.avro.data.RecordBuilder<RideRecordNoneCompatible> { private int vendorId; private int passenger_count; private double trip_distance; /** Creates a new Builder */ private Builder() { super(SCHEMA$, MODEL$); } /** * Creates a Builder by copying an existing Builder. * @param other The existing Builder to copy. */ private Builder(schemaregistry.RideRecordNoneCompatible.Builder other) { super(other); if (isValidValue(fields()[0], other.vendorId)) { this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId); fieldSetFlags()[0] = other.fieldSetFlags()[0]; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = other.fieldSetFlags()[1]; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = other.fieldSetFlags()[2]; } } /** * Creates a Builder by copying an existing RideRecordNoneCompatible instance * @param other The existing instance to copy. */ private Builder(schemaregistry.RideRecordNoneCompatible other) { super(SCHEMA$, MODEL$); if (isValidValue(fields()[0], other.vendorId)) { this.vendorId = data().deepCopy(fields()[0].schema(), other.vendorId); fieldSetFlags()[0] = true; } if (isValidValue(fields()[1], other.passenger_count)) { this.passenger_count = data().deepCopy(fields()[1].schema(), other.passenger_count); fieldSetFlags()[1] = true; } if (isValidValue(fields()[2], other.trip_distance)) { this.trip_distance = data().deepCopy(fields()[2].schema(), other.trip_distance); fieldSetFlags()[2] = true; } } /** * Gets the value of the 'vendorId' field. * @return The value. */ public int getVendorId() { return vendorId; } /** * Sets the value of the 'vendorId' field. * @param value The value of 'vendorId'. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder setVendorId(int value) { validate(fields()[0], value); this.vendorId = value; fieldSetFlags()[0] = true; return this; } /** * Checks whether the 'vendorId' field has been set. * @return True if the 'vendorId' field has been set, false otherwise. */ public boolean hasVendorId() { return fieldSetFlags()[0]; } /** * Clears the value of the 'vendorId' field. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder clearVendorId() { fieldSetFlags()[0] = false; return this; } /** * Gets the value of the 'passenger_count' field. * @return The value. */ public int getPassengerCount() { return passenger_count; } /** * Sets the value of the 'passenger_count' field. * @param value The value of 'passenger_count'. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder setPassengerCount(int value) { validate(fields()[1], value); this.passenger_count = value; fieldSetFlags()[1] = true; return this; } /** * Checks whether the 'passenger_count' field has been set. * @return True if the 'passenger_count' field has been set, false otherwise. */ public boolean hasPassengerCount() { return fieldSetFlags()[1]; } /** * Clears the value of the 'passenger_count' field. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder clearPassengerCount() { fieldSetFlags()[1] = false; return this; } /** * Gets the value of the 'trip_distance' field. * @return The value. */ public double getTripDistance() { return trip_distance; } /** * Sets the value of the 'trip_distance' field. * @param value The value of 'trip_distance'. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder setTripDistance(double value) { validate(fields()[2], value); this.trip_distance = value; fieldSetFlags()[2] = true; return this; } /** * Checks whether the 'trip_distance' field has been set. * @return True if the 'trip_distance' field has been set, false otherwise. */ public boolean hasTripDistance() { return fieldSetFlags()[2]; } /** * Clears the value of the 'trip_distance' field. * @return This builder. */ public schemaregistry.RideRecordNoneCompatible.Builder clearTripDistance() { fieldSetFlags()[2] = false; return this; } @Override @SuppressWarnings("unchecked") public RideRecordNoneCompatible build() { try { RideRecordNoneCompatible record = new RideRecordNoneCompatible(); record.vendorId = fieldSetFlags()[0] ? this.vendorId : (java.lang.Integer) defaultValue(fields()[0]); record.passenger_count = fieldSetFlags()[1] ? this.passenger_count : (java.lang.Integer) defaultValue(fields()[1]); record.trip_distance = fieldSetFlags()[2] ? this.trip_distance : (java.lang.Double) defaultValue(fields()[2]); return record; } catch (org.apache.avro.AvroMissingFieldException e) { throw e; } catch (java.lang.Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumWriter<RideRecordNoneCompatible> WRITER$ = (org.apache.avro.io.DatumWriter<RideRecordNoneCompatible>)MODEL$.createDatumWriter(SCHEMA$); @Override public void writeExternal(java.io.ObjectOutput out) throws java.io.IOException { WRITER$.write(this, SpecificData.getEncoder(out)); } @SuppressWarnings("unchecked") private static final org.apache.avro.io.DatumReader<RideRecordNoneCompatible> READER$ = (org.apache.avro.io.DatumReader<RideRecordNoneCompatible>)MODEL$.createDatumReader(SCHEMA$); @Override public void readExternal(java.io.ObjectInput in) throws java.io.IOException { READER$.read(this, SpecificData.getDecoder(in)); } @Override protected boolean hasCustomCoders() { return true; } @Override public void customEncode(org.apache.avro.io.Encoder out) throws java.io.IOException { out.writeInt(this.vendorId); out.writeInt(this.passenger_count); out.writeDouble(this.trip_distance); } @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) throws java.io.IOException { org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); if (fieldOrder == null) { this.vendorId = in.readInt(); this.passenger_count = in.readInt(); this.trip_distance = in.readDouble(); } else { for (int i = 0; i < 3; i++) { switch (fieldOrder[i].pos()) { case 0: this.vendorId = in.readInt(); break; case 1: this.passenger_count = in.readInt(); break; case 2: this.trip_distance = in.readDouble(); break; default: throw new java.io.IOException("Corrupt ResolvingDecoder."); } } } } }
30.607966
344
0.675975
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import com.opencsv.CSVReader; import com.opencsv.exceptions.CsvException; import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; import io.confluent.kafka.serializers.KafkaAvroSerializer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.streams.StreamsConfig; import schemaregistry.RideRecord; import java.io.FileReader; import java.io.IOException; import java.util.List; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; public class AvroProducer { private Properties props = new Properties(); public AvroProducer() { props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName()); props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "https://psrc-kk5gg.europe-west3.gcp.confluent.cloud"); props.put("basic.auth.credentials.source", "USER_INFO"); props.put("basic.auth.user.info", Secrets.SCHEMA_REGISTRY_KEY+":"+Secrets.SCHEMA_REGISTRY_SECRET); } public List<RideRecord> getRides() throws IOException, CsvException { var ridesStream = this.getClass().getResource("/rides.csv"); var reader = new CSVReader(new FileReader(ridesStream.getFile())); reader.skip(1); return reader.readAll().stream().map(row -> RideRecord.newBuilder() .setVendorId(row[0]) .setTripDistance(Double.parseDouble(row[4])) .setPassengerCount(Integer.parseInt(row[3])) .build() ).collect(Collectors.toList()); } public void publishRides(List<RideRecord> rides) throws ExecutionException, InterruptedException { KafkaProducer<String, RideRecord> kafkaProducer = new KafkaProducer<>(props); for (RideRecord ride : rides) { var record = kafkaProducer.send(new ProducerRecord<>("rides_avro", String.valueOf(ride.getVendorId()), ride), (metadata, exception) -> { if (exception != null) { System.out.println(exception.getMessage()); } }); System.out.println(record.get().offset()); Thread.sleep(500); } } public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException { var producer = new AvroProducer(); var rideRecords = producer.getRides(); producer.publishRides(rideRecords); } }
44.767123
192
0.688323
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerConfig; import org.example.data.Ride; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.time.temporal.TemporalUnit; import java.util.List; import java.util.Properties; import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig; public class JsonConsumer { private Properties props = new Properties(); private KafkaConsumer<String, Ride> consumer; public JsonConsumer() { props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonDeserializer"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "kafka_tutorial_example.jsonconsumer.v2"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(KafkaJsonDeserializerConfig.JSON_VALUE_TYPE, Ride.class); consumer = new KafkaConsumer<String, Ride>(props); consumer.subscribe(List.of("rides")); } public void consumeFromKafka() { System.out.println("Consuming form kafka started"); var results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS)); var i = 0; do { for(ConsumerRecord<String, Ride> result: results) { System.out.println(result.value().DOLocationID); } results = consumer.poll(Duration.of(1, ChronoUnit.SECONDS)); System.out.println("RESULTS:::" + results.count()); i++; } while(!results.isEmpty() || i < 10); } public static void main(String[] args) { JsonConsumer jsonConsumer = new JsonConsumer(); jsonConsumer.consumeFromKafka(); } }
42.631579
192
0.697104
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Produced; import org.example.customserdes.CustomSerdes; import org.example.data.Ride; import java.util.Properties; public class JsonKStream { private Properties props = new Properties(); public JsonKStream() { props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); } public Topology createTopology() { StreamsBuilder streamsBuilder = new StreamsBuilder(); var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class))); var puLocationCount = ridesStream.groupByKey().count().toStream(); puLocationCount.to("rides-pulocation-count", Produced.with(Serdes.String(), Serdes.Long())); return streamsBuilder.build(); } public void countPLocation() throws InterruptedException { var topology = createTopology(); var kStreams = new KafkaStreams(topology, props); kStreams.start(); while (kStreams.state() != KafkaStreams.State.RUNNING) { System.out.println(kStreams.state()); Thread.sleep(1000); } System.out.println(kStreams.state()); Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close)); } public static void main(String[] args) throws InterruptedException { var object = new JsonKStream(); object.countPLocation(); } }
42.175439
192
0.707724
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler; import org.apache.kafka.streams.kstream.*; import org.example.customserdes.CustomSerdes; import org.example.data.PickupLocation; import org.example.data.Ride; import org.example.data.VendorInfo; import java.time.Duration; import java.util.Optional; import java.util.Properties; public class JsonKStreamJoins { private Properties props = new Properties(); public JsonKStreamJoins() { props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.joined.rides.pickuplocation.v1"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); } public Topology createTopology() { StreamsBuilder streamsBuilder = new StreamsBuilder(); KStream<String, Ride> rides = streamsBuilder.stream(Topics.INPUT_RIDE_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class))); KStream<String, PickupLocation> pickupLocations = streamsBuilder.stream(Topics.INPUT_RIDE_LOCATION_TOPIC, Consumed.with(Serdes.String(), CustomSerdes.getSerde(PickupLocation.class))); var pickupLocationsKeyedOnPUId = pickupLocations.selectKey((key, value) -> String.valueOf(value.PULocationID)); var joined = rides.join(pickupLocationsKeyedOnPUId, (ValueJoiner<Ride, PickupLocation, Optional<VendorInfo>>) (ride, pickupLocation) -> { var period = Duration.between(ride.tpep_dropoff_datetime, pickupLocation.tpep_pickup_datetime); if (period.abs().toMinutes() > 10) return Optional.empty(); else return Optional.of(new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime)); }, JoinWindows.ofTimeDifferenceAndGrace(Duration.ofMinutes(20), Duration.ofMinutes(5)), StreamJoined.with(Serdes.String(), CustomSerdes.getSerde(Ride.class), CustomSerdes.getSerde(PickupLocation.class))); joined.filter(((key, value) -> value.isPresent())).mapValues(Optional::get) .to(Topics.OUTPUT_TOPIC, Produced.with(Serdes.String(), CustomSerdes.getSerde(VendorInfo.class))); return streamsBuilder.build(); } public void joinRidesPickupLocation() throws InterruptedException { var topology = createTopology(); var kStreams = new KafkaStreams(topology, props); kStreams.setUncaughtExceptionHandler(exception -> { System.out.println(exception.getMessage()); return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_APPLICATION; }); kStreams.start(); while (kStreams.state() != KafkaStreams.State.RUNNING) { System.out.println(kStreams.state()); Thread.sleep(1000); } System.out.println(kStreams.state()); Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close)); } public static void main(String[] args) throws InterruptedException { var object = new JsonKStreamJoins(); object.joinRidesPickupLocation(); } }
50.922078
192
0.718039
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.kstream.WindowedSerdes; import org.example.customserdes.CustomSerdes; import org.example.data.Ride; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.Properties; public class JsonKStreamWindow { private Properties props = new Properties(); public JsonKStreamWindow() { props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka_tutorial.kstream.count.plocation.v1"); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0); } public Topology createTopology() { StreamsBuilder streamsBuilder = new StreamsBuilder(); var ridesStream = streamsBuilder.stream("rides", Consumed.with(Serdes.String(), CustomSerdes.getSerde(Ride.class))); var puLocationCount = ridesStream.groupByKey() .windowedBy(TimeWindows.ofSizeAndGrace(Duration.ofSeconds(10), Duration.ofSeconds(5))) .count().toStream(); var windowSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10*1000); puLocationCount.to("rides-pulocation-window-count", Produced.with(windowSerde, Serdes.Long())); return streamsBuilder.build(); } public void countPLocationWindowed() { var topology = createTopology(); var kStreams = new KafkaStreams(topology, props); kStreams.start(); Runtime.getRuntime().addShutdownHook(new Thread(kStreams::close)); } public static void main(String[] args) { var object = new JsonKStreamWindow(); object.countPLocationWindowed(); } }
41.983607
192
0.724151
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import com.opencsv.CSVReader; import com.opencsv.exceptions.CsvException; import org.apache.kafka.clients.producer.*; import org.apache.kafka.streams.StreamsConfig; import org.example.data.Ride; import java.io.FileReader; import java.io.IOException; import java.time.LocalDateTime; import java.util.List; import java.util.Properties; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; public class JsonProducer { private Properties props = new Properties(); public JsonProducer() { props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer"); } public List<Ride> getRides() throws IOException, CsvException { var ridesStream = this.getClass().getResource("/rides.csv"); var reader = new CSVReader(new FileReader(ridesStream.getFile())); reader.skip(1); return reader.readAll().stream().map(arr -> new Ride(arr)) .collect(Collectors.toList()); } public void publishRides(List<Ride> rides) throws ExecutionException, InterruptedException { KafkaProducer<String, Ride> kafkaProducer = new KafkaProducer<String, Ride>(props); for(Ride ride: rides) { ride.tpep_pickup_datetime = LocalDateTime.now().minusMinutes(20); ride.tpep_dropoff_datetime = LocalDateTime.now(); var record = kafkaProducer.send(new ProducerRecord<>("rides", String.valueOf(ride.DOLocationID), ride), (metadata, exception) -> { if(exception != null) { System.out.println(exception.getMessage()); } }); System.out.println(record.get().offset()); System.out.println(ride.DOLocationID); Thread.sleep(500); } } public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException { var producer = new JsonProducer(); var rides = producer.getRides(); producer.publishRides(rides); } }
44.278689
192
0.682361
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import com.opencsv.exceptions.CsvException; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; import org.example.data.PickupLocation; import java.io.IOException; import java.time.LocalDateTime; import java.util.Properties; import java.util.concurrent.ExecutionException; public class JsonProducerPickupLocation { private Properties props = new Properties(); public JsonProducerPickupLocation() { props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "pkc-75m1o.europe-west3.gcp.confluent.cloud:9092"); props.put("security.protocol", "SASL_SSL"); props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='"+Secrets.KAFKA_CLUSTER_KEY+"' password='"+Secrets.KAFKA_CLUSTER_SECRET+"';"); props.put("sasl.mechanism", "PLAIN"); props.put("client.dns.lookup", "use_all_dns_ips"); props.put("session.timeout.ms", "45000"); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer"); } public void publish(PickupLocation pickupLocation) throws ExecutionException, InterruptedException { KafkaProducer<String, PickupLocation> kafkaProducer = new KafkaProducer<String, PickupLocation>(props); var record = kafkaProducer.send(new ProducerRecord<>("rides_location", String.valueOf(pickupLocation.PULocationID), pickupLocation), (metadata, exception) -> { if (exception != null) { System.out.println(exception.getMessage()); } }); System.out.println(record.get().offset()); } public static void main(String[] args) throws IOException, CsvException, ExecutionException, InterruptedException { var producer = new JsonProducerPickupLocation(); producer.publish(new PickupLocation(186, LocalDateTime.now())); } }
47.577778
192
0.730892
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; public class Secrets { public static final String KAFKA_CLUSTER_KEY = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_KEY"; public static final String KAFKA_CLUSTER_SECRET = "REPLACE_WITH_YOUR_KAFKA_CLUSTER_SECRET"; public static final String SCHEMA_REGISTRY_KEY = "REPLACE_WITH_SCHEMA_REGISTRY_KEY"; public static final String SCHEMA_REGISTRY_SECRET = "REPLACE_WITH_SCHEMA_REGISTRY_SECRET"; }
37.181818
95
0.761337
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; public class Topics { public static final String INPUT_RIDE_TOPIC = "rides"; public static final String INPUT_RIDE_LOCATION_TOPIC = "rides_location"; public static final String OUTPUT_TOPIC = "vendor_info"; }
29.5
76
0.73251
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example.customserdes; import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; import io.confluent.kafka.serializers.KafkaJsonDeserializer; import io.confluent.kafka.serializers.KafkaJsonSerializer; import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde; import org.apache.avro.specific.SpecificRecordBase; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.Serializer; import org.example.data.PickupLocation; import org.example.data.Ride; import org.example.data.VendorInfo; import java.util.HashMap; import java.util.Map; public class CustomSerdes { public static <T> Serde<T> getSerde(Class<T> classOf) { Map<String, Object> serdeProps = new HashMap<>(); serdeProps.put("json.value.type", classOf); final Serializer<T> mySerializer = new KafkaJsonSerializer<>(); mySerializer.configure(serdeProps, false); final Deserializer<T> myDeserializer = new KafkaJsonDeserializer<>(); myDeserializer.configure(serdeProps, false); return Serdes.serdeFrom(mySerializer, myDeserializer); } public static <T extends SpecificRecordBase> SpecificAvroSerde getAvroSerde(boolean isKey, String schemaRegistryUrl) { var serde = new SpecificAvroSerde<T>(); Map<String, Object> serdeProps = new HashMap<>(); serdeProps.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl); serde.configure(serdeProps, isKey); return serde; } }
37.325581
122
0.763206
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example.data; import java.time.LocalDateTime; public class PickupLocation { public PickupLocation(long PULocationID, LocalDateTime tpep_pickup_datetime) { this.PULocationID = PULocationID; this.tpep_pickup_datetime = tpep_pickup_datetime; } public PickupLocation() { } public long PULocationID; public LocalDateTime tpep_pickup_datetime; }
22.352941
82
0.724747
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example.data; import java.nio.DoubleBuffer; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; public class Ride { public Ride(String[] arr) { VendorID = arr[0]; tpep_pickup_datetime = LocalDateTime.parse(arr[1], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); tpep_dropoff_datetime = LocalDateTime.parse(arr[2], DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); passenger_count = Integer.parseInt(arr[3]); trip_distance = Double.parseDouble(arr[4]); RatecodeID = Long.parseLong(arr[5]); store_and_fwd_flag = arr[6]; PULocationID = Long.parseLong(arr[7]); DOLocationID = Long.parseLong(arr[8]); payment_type = arr[9]; fare_amount = Double.parseDouble(arr[10]); extra = Double.parseDouble(arr[11]); mta_tax = Double.parseDouble(arr[12]); tip_amount = Double.parseDouble(arr[13]); tolls_amount = Double.parseDouble(arr[14]); improvement_surcharge = Double.parseDouble(arr[15]); total_amount = Double.parseDouble(arr[16]); congestion_surcharge = Double.parseDouble(arr[17]); } public Ride(){} public String VendorID; public LocalDateTime tpep_pickup_datetime; public LocalDateTime tpep_dropoff_datetime; public int passenger_count; public double trip_distance; public long RatecodeID; public String store_and_fwd_flag; public long PULocationID; public long DOLocationID; public String payment_type; public double fare_amount; public double extra; public double mta_tax; public double tip_amount; public double tolls_amount; public double improvement_surcharge; public double total_amount; public double congestion_surcharge; }
35.56
112
0.681445
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example.data; import java.time.LocalDateTime; public class VendorInfo { public VendorInfo(String vendorID, long PULocationID, LocalDateTime pickupTime, LocalDateTime lastDropoffTime) { VendorID = vendorID; this.PULocationID = PULocationID; this.pickupTime = pickupTime; this.lastDropoffTime = lastDropoffTime; } public VendorInfo() { } public String VendorID; public long PULocationID; public LocalDateTime pickupTime; public LocalDateTime lastDropoffTime; }
23.590909
116
0.72037
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.*; import org.example.customserdes.CustomSerdes; import org.example.data.PickupLocation; import org.example.data.Ride; import org.example.data.VendorInfo; import org.example.helper.DataGeneratorHelper; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import javax.xml.crypto.Data; import java.util.Properties; import static org.junit.jupiter.api.Assertions.*; class JsonKStreamJoinsTest { private Properties props = new Properties(); private static TopologyTestDriver testDriver; private TestInputTopic<String, Ride> ridesTopic; private TestInputTopic<String, PickupLocation> pickLocationTopic; private TestOutputTopic<String, VendorInfo> outputTopic; private Topology topology = new JsonKStreamJoins().createTopology(); @BeforeEach public void setup() { props = new Properties(); props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application"); props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"); if (testDriver != null) { testDriver.close(); } testDriver = new TopologyTestDriver(topology, props); ridesTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer()); pickLocationTopic = testDriver.createInputTopic(Topics.INPUT_RIDE_LOCATION_TOPIC, Serdes.String().serializer(), CustomSerdes.getSerde(PickupLocation.class).serializer()); outputTopic = testDriver.createOutputTopic(Topics.OUTPUT_TOPIC, Serdes.String().deserializer(), CustomSerdes.getSerde(VendorInfo.class).deserializer()); } @Test public void testIfJoinWorksOnSameDropOffPickupLocationId() { Ride ride = DataGeneratorHelper.generateRide(); PickupLocation pickupLocation = DataGeneratorHelper.generatePickUpLocation(ride.DOLocationID); ridesTopic.pipeInput(String.valueOf(ride.DOLocationID), ride); pickLocationTopic.pipeInput(String.valueOf(pickupLocation.PULocationID), pickupLocation); assertEquals(outputTopic.getQueueSize(), 1); var expected = new VendorInfo(ride.VendorID, pickupLocation.PULocationID, pickupLocation.tpep_pickup_datetime, ride.tpep_dropoff_datetime); var result = outputTopic.readKeyValue(); assertEquals(result.key, String.valueOf(ride.DOLocationID)); assertEquals(result.value.VendorID, expected.VendorID); assertEquals(result.value.pickupTime, expected.pickupTime); } @AfterAll public static void shutdown() { testDriver.close(); } }
44.460317
178
0.754803
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.*; import org.example.customserdes.CustomSerdes; import org.example.data.Ride; import org.example.helper.DataGeneratorHelper; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.*; import java.util.Properties; class JsonKStreamTest { private Properties props; private static TopologyTestDriver testDriver; private TestInputTopic<String, Ride> inputTopic; private TestOutputTopic<String, Long> outputTopic; private Topology topology = new JsonKStream().createTopology(); @BeforeEach public void setup() { props = new Properties(); props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testing_count_application"); props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"); if (testDriver != null) { testDriver.close(); } testDriver = new TopologyTestDriver(topology, props); inputTopic = testDriver.createInputTopic("rides", Serdes.String().serializer(), CustomSerdes.getSerde(Ride.class).serializer()); outputTopic = testDriver.createOutputTopic("rides-pulocation-count", Serdes.String().deserializer(), Serdes.Long().deserializer()); } @Test public void testIfOneMessageIsPassedToInputTopicWeGetCountOfOne() { Ride ride = DataGeneratorHelper.generateRide(); inputTopic.pipeInput(String.valueOf(ride.DOLocationID), ride); assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride.DOLocationID), 1L)); assertTrue(outputTopic.isEmpty()); } @Test public void testIfTwoMessageArePassedWithDifferentKey() { Ride ride1 = DataGeneratorHelper.generateRide(); ride1.DOLocationID = 100L; inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1); Ride ride2 = DataGeneratorHelper.generateRide(); ride2.DOLocationID = 200L; inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2); assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride1.DOLocationID), 1L)); assertEquals(outputTopic.readKeyValue(), KeyValue.pair(String.valueOf(ride2.DOLocationID), 1L)); assertTrue(outputTopic.isEmpty()); } @Test public void testIfTwoMessageArePassedWithSameKey() { Ride ride1 = DataGeneratorHelper.generateRide(); ride1.DOLocationID = 100L; inputTopic.pipeInput(String.valueOf(ride1.DOLocationID), ride1); Ride ride2 = DataGeneratorHelper.generateRide(); ride2.DOLocationID = 100L; inputTopic.pipeInput(String.valueOf(ride2.DOLocationID), ride2); assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 1L)); assertEquals(outputTopic.readKeyValue(), KeyValue.pair("100", 2L)); assertTrue(outputTopic.isEmpty()); } @AfterAll public static void tearDown() { testDriver.close(); } }
37.7375
139
0.715623
data-engineering-zoomcamp
https://github.com/DataTalksClub/data-engineering-zoomcamp
Free Data Engineering course!
15,757
3,602
2023-12-05 01:08:47+00:00
2021-10-21 09:32:50+00:00
1,561
null
Java
package org.example.helper; import org.example.data.PickupLocation; import org.example.data.Ride; import org.example.data.VendorInfo; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.List; public class DataGeneratorHelper { public static Ride generateRide() { var arrivalTime = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); var departureTime = LocalDateTime.now().minusMinutes(30).format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); return new Ride(new String[]{"1", departureTime, arrivalTime,"1","1.50","1","N","238","75","2","8","0.5","0.5","0","0","0.3","9.3","0"}); } public static PickupLocation generatePickUpLocation(long pickupLocationId) { return new PickupLocation(pickupLocationId, LocalDateTime.now()); } }
38
145
0.715286
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from datetime import datetime, timedelta from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators import LoadDimensionOperator from helpers import SqlQueries def load_dimension_subdag( parent_dag_name, task_id, redshift_conn_id, sql_statement, delete_load, table_name, *args, **kwargs): dag = DAG(f"{parent_dag_name}.{task_id}", **kwargs) load_dimension_table = LoadDimensionOperator( task_id=task_id, dag=dag, redshift_conn_id=redshift_conn_id, sql_query = sql_statement, delete_load = delete_load, table_name = table_name, ) load_dimension_table return dag
23.333333
58
0.657064
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from datetime import datetime, timedelta import os from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators import ( CreateTableOperator, StageToRedshiftOperator, LoadFactOperator, LoadDimensionOperator, DataQualityOperator) from helpers import SqlQueries from sparkify_dimension_subdag import load_dimension_subdag from airflow.operators.subdag_operator import SubDagOperator #AWS_KEY = os.environ.get('AWS_KEY') #AWS_SECRET = os.environ.get('AWS_SECRET') s3_bucket = 'udacity-dend-warehouse' song_s3_key = "song_data" log_s3_key = "log-data" log_json_file = "log_json_path.json" default_args = { 'owner': 'udacity', 'depends_on_past': True, 'start_date': datetime(2019, 1, 12), 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5), 'catchup': True } dag_name = 'udac_example_dag' dag = DAG(dag_name, default_args=default_args, description='Load and transform data in Redshift with Airflow', schedule_interval='0 * * * *', max_active_runs = 1 ) start_operator = DummyOperator(task_id='Begin_execution', dag=dag) create_tables_in_redshift = CreateTableOperator( task_id = 'create_tables_in_redshift', redshift_conn_id = 'redshift', dag = dag ) stage_events_to_redshift = StageToRedshiftOperator( task_id='Stage_events', table_name="staging_events", s3_bucket = s3_bucket, s3_key = log_s3_key, file_format="JSON", log_json_file = log_json_file, redshift_conn_id = "redshift", aws_credential_id="aws_credentials", dag=dag, provide_context=True ) stage_songs_to_redshift = StageToRedshiftOperator( task_id='Stage_songs', table_name="staging_songs", s3_bucket = s3_bucket, s3_key = song_s3_key, file_format="JSON", redshift_conn_id = "redshift", aws_credential_id="aws_credentials", dag=dag, provide_context=True ) load_songplays_table = LoadFactOperator( task_id='Load_songplays_fact_table', redshift_conn_id = 'redshift', sql_query = SqlQueries.songplay_table_insert, dag=dag ) load_user_dimension_table = SubDagOperator( subdag=load_dimension_subdag( parent_dag_name=dag_name, task_id="Load_user_dim_table", redshift_conn_id="redshift", start_date=default_args['start_date'], sql_statement=SqlQueries.user_table_insert, delete_load = True, table_name = "users", ), task_id="Load_user_dim_table", dag=dag, ) load_song_dimension_table = SubDagOperator( subdag=load_dimension_subdag( parent_dag_name=dag_name, task_id="Load_song_dim_table", redshift_conn_id="redshift", start_date=default_args['start_date'], sql_statement=SqlQueries.song_table_insert, delete_load = True, table_name = "songs", ), task_id="Load_song_dim_table", dag=dag, ) load_artist_dimension_table = SubDagOperator( subdag=load_dimension_subdag( parent_dag_name=dag_name, task_id="Load_artist_dim_table", redshift_conn_id="redshift", start_date=default_args['start_date'], sql_statement=SqlQueries.artist_table_insert, delete_load = True, table_name = "artists", ), task_id="Load_artist_dim_table", dag=dag, ) load_time_dimension_table = SubDagOperator( subdag=load_dimension_subdag( parent_dag_name=dag_name, task_id="Load_time_dim_table", redshift_conn_id="redshift", start_date=default_args['start_date'], sql_statement=SqlQueries.time_table_insert, delete_load = True, table_name = "time", ), task_id="Load_time_dim_table", dag=dag, ) run_quality_checks = DataQualityOperator( task_id='Run_data_quality_checks', dag=dag, redshift_conn_id = "redshift", tables = ["artists", "songplays", "songs", "time", "users"] ) end_operator = DummyOperator(task_id='Stop_execution', dag=dag) start_operator >> create_tables_in_redshift create_tables_in_redshift >> [stage_songs_to_redshift, stage_events_to_redshift] >> load_songplays_table load_songplays_table >> [load_user_dimension_table, load_song_dimension_table, load_artist_dimension_table, load_time_dimension_table] >> run_quality_checks >> end_operator
27.06962
172
0.657871
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
0
0
0
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import configparser # CONFIG config = configparser.ConfigParser() config.read('dwh.cfg') # DROP TABLES staging_events_table_drop = "DROP TABle IF EXISTS staging_events;" staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;" songplay_table_drop = "DROP TABLE IF EXISTS songplays;" user_table_drop = "DROP TABLE IF EXISTS users;" song_table_drop = "DROP TABLE IF EXISTS songs;" artist_table_drop = "DROP TABLE IF EXISTS artists;" time_table_drop = "DROP TABLE IF EXISTS time;" # CREATE TABLES staging_events_table_create= (""" CREATE TABLE IF NOT EXISTS staging_events ( artist VARCHAR, auth VARCHAR, firstName VARCHAR(50), gender CHAR, itemInSession INTEGER, lastName VARCHAR(50), length FLOAT, level VARCHAR, location VARCHAR, method VARCHAR, page VARCHAR, registration FLOAT, sessionId INTEGER, song VARCHAR, status INTEGER, ts BIGINT, userAgent VARCHAR, userId INTEGER ); """) staging_songs_table_create = (""" CREATE TABLE IF NOT EXISTS staging_songs ( num_songs INTEGER, artist_id VARCHAR, artist_latitude FLOAT, artist_longitude FLOAT, artist_location VARCHAR, artist_name VARCHAR, song_id VARCHAR, title VARCHAR, duration FLOAT, year FLOAT ); """) songplay_table_create = (""" CREATE TABLE IF NOT EXISTS songplays ( songplay_id INTEGER IDENTITY (1, 1) PRIMARY KEY , start_time TIMESTAMP, user_id INTEGER, level VARCHAR, song_id VARCHAR, artist_id VARCHAR, session_id INTEGER, location VARCHAR, user_agent VARCHAR ) DISTSTYLE KEY DISTKEY ( start_time ) SORTKEY ( start_time ); """) user_table_create = (""" CREATE TABLE IF NOT EXISTS users ( userId INTEGER PRIMARY KEY, firsname VARCHAR(50), lastname VARCHAR(50), gender CHAR(1) ENCODE BYTEDICT, level VARCHAR ENCODE BYTEDICT ) SORTKEY (userId); """) song_table_create = (""" CREATE TABLE IF NOT EXISTS songs ( song_id VARCHAR PRIMARY KEY, title VARCHAR, artist_id VARCHAR, year INTEGER ENCODE BYTEDICT, duration FLOAT ) SORTKEY (song_id); """) artist_table_create = (""" CREATE TABLE IF NOT EXISTS artists ( artist_id VARCHAR PRIMARY KEY , name VARCHAR, location VARCHAR, latitude FLOAT, longitude FLOAT ) SORTKEY (artist_id); """) time_table_create = (""" CREATE TABLE IF NOT EXISTS time ( start_time TIMESTAMP PRIMARY KEY , hour INTEGER, day INTEGER, week INTEGER, month INTEGER, year INTEGER ENCODE BYTEDICT , weekday VARCHAR(9) ENCODE BYTEDICT ) DISTSTYLE KEY DISTKEY ( start_time ) SORTKEY (start_time); """) # STAGING TABLES staging_events_copy = (""" COPY staging_events FROM {} iam_role {} FORMAT AS json {}; """).format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config['S3']['LOG_JSONPATH']) staging_songs_copy = (""" COPY staging_songs FROM {} iam_role {} FORMAT AS json 'auto'; """).format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN']) # FINAL TABLES songplay_table_insert = (""" INSERT INTO songplays (START_TIME, USER_ID, LEVEL, SONG_ID, ARTIST_ID, SESSION_ID, LOCATION, USER_AGENT) SELECT DISTINCT TIMESTAMP 'epoch' + (se.ts / 1000) * INTERVAL '1 second' as start_time, se.userId, se.level, ss.song_id, ss.artist_id, se.sessionId, se.location, se.userAgent FROM staging_songs ss INNER JOIN staging_events se ON (ss.title = se.song AND se.artist = ss.artist_name) AND se.page = 'NextSong'; """) user_table_insert = (""" INSERT INTO users SELECT DISTINCT userId, firstName, lastName, gender, level FROM staging_events WHERE userId IS NOT NULL AND page = 'NextSong'; """) song_table_insert = (""" INSERT INTO songs SELECT DISTINCT song_id, title, artist_id, year, duration FROM staging_songs WHERE song_id IS NOT NULL; """) artist_table_insert = (""" INSERT INTO artists SELECT DISTINCT artist_id, artist_name, artist_location, artist_latitude, artist_longitude FROM staging_songs; """) time_table_insert = (""" insert into time SELECT DISTINCT TIMESTAMP 'epoch' + (ts/1000) * INTERVAL '1 second' as start_time, EXTRACT(HOUR FROM start_time) AS hour, EXTRACT(DAY FROM start_time) AS day, EXTRACT(WEEKS FROM start_time) AS week, EXTRACT(MONTH FROM start_time) AS month, EXTRACT(YEAR FROM start_time) AS year, to_char(start_time, 'Day') AS weekday FROM staging_events; """) # QUERY LISTS create_table_queries = [staging_events_table_create, staging_songs_table_create, songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create] drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] copy_table_queries = [staging_events_copy, staging_songs_copy] insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
23.429952
181
0.68038
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class CreateTableOperator(BaseOperator): ui_color = '#358140' @apply_defaults def __init__(self, redshift_conn_id = "", *args, **kwargs): super(CreateTableOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id def execute(self, context): self.log.info('Creating Postgres SQL Hook') redshift = PostgresHook(postgres_conn_id = self.redshift_conn_id) self.log.info('Executing creating tables in Redshift.') queries = open('/home/workspace/airflow/create_tables.sql', 'r').read() redshift.run(queries) self.log.info("Tables created ")
26.7
80
0.651807
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class DataQualityOperator(BaseOperator): ui_color = '#89DA59' @apply_defaults def __init__(self, redshift_conn_id="", tables = [], *args, **kwargs): super(DataQualityOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.tables = tables def execute(self, context): redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id) for table in self.tables: self.log.info(f"Starting data quality validation on table : {table}") records = redshift_hook.get_records(f"select count(*) from {table};") if len(records) < 1 or len(records[0]) < 1 or records[0][0] < 1: self.log.error(f"Data Quality validation failed for table : {table}.") raise ValueError(f"Data Quality validation failed for table : {table}") self.log.info(f"Data Quality Validation Passed on table : {table}!!!") self.log.info('DataQualityOperator not implemented yet')
36.823529
94
0.595331
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class LoadDimensionOperator(BaseOperator): ui_color = '#80BD9E' @apply_defaults def __init__(self, redshift_conn_id="", sql_query = "", delete_load = False, table_name = "", *args, **kwargs): super(LoadDimensionOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.sql_query = sql_query self.table_name = table_name self.delete_load = delete_load def execute(self, context): redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id) if self.delete_load: self.log.info(f"Delete load operation set to TRUE. Running delete statement on table {self.table_name}") redshift_hook.run(f"DELETE FROM {self.table_name}") self.log.info(f"Running query to load data into Dimension Table {self.table_name}") redshift_hook.run(self.sql_query) self.log.info(f"Dimension Table {self.table_name} loaded.")
36.65625
116
0.622924
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class LoadFactOperator(BaseOperator): ui_color = '#F98866' @apply_defaults def __init__(self, redshift_conn_id="", sql_query = "", *args, **kwargs): super(LoadFactOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.sql_query = sql_query def execute(self, context): redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id) redshift_hook.run(self.sql_query)
27.416667
78
0.621145
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults from airflow.contrib.hooks.aws_hook import AwsHook class StageToRedshiftOperator(BaseOperator): ui_color = '#358140' copy_query = " COPY {} \ FROM '{}' \ ACCESS_KEY_ID '{}' \ SECRET_ACCESS_KEY '{}' \ FORMAT AS json '{}'; \ " @apply_defaults def __init__(self, redshift_conn_id="", aws_credential_id="", table_name = "", s3_bucket="", s3_key = "", file_format = "", log_json_file = "", *args, **kwargs): super(StageToRedshiftOperator, self).__init__(*args, **kwargs) self.redshift_conn_id = redshift_conn_id self.aws_credential_id = aws_credential_id self.table_name = table_name self.s3_bucket = s3_bucket self.s3_key = s3_key self.file_format = file_format self.log_json_file = log_json_file self.execution_date = kwargs.get('execution_date') def execute(self, context): aws_hook = AwsHook(self.aws_credential_id) credentials = aws_hook.get_credentials() s3_path = "s3://{}/{}".format(self.s3_bucket, self.s3_key) self.log.info(f"Picking staging file for table {self.table_name} from location : {s3_path}") if self.log_json_file != "": self.log_json_file = "s3://{}/{}".format(self.s3_bucket, self.log_json_file) copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, self.log_json_file) else: copy_query = self.copy_query.format(self.table_name, s3_path, credentials.access_key, credentials.secret_key, 'auto') self.log.info(f"Running copy query : {copy_query}") redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id) redshift_hook.run(copy_query) self.log.info(f"Table {self.table_name} staged successfully!!")
37.280702
141
0.577258
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import configparser from pathlib import Path config = configparser.ConfigParser() config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg")) api_key = config['KEYS']['API_KEY'] headers = {'Authorization': 'Bearer %s' % api_key}
28.625
65
0.711864
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
# This is request module of this project from request import Request from auth import headers import json class BusinessSearch: def __init__(self, term, location, price=None): self._param = {'term' : term, 'location' : location} if price: self._param['price'] = price self._base_url = 'https://api.yelp.com/v3/businesses/search' self._business_list = self._search_business() def _search_business(self): business_search_request = Request.get_content(url=self._base_url, param=self._param) return business_search_request['businesses'] if business_search_request is not None else [] def _parse_results(self, data): # Categories data : 'categories': [{'alias': 'bakeries', 'title': 'Bakeries'}] categories = ' '.join([category['title'] for category in data['categories']]) # Longitude and latitude data : 'coordinates': {'latitude': 45.5232, 'longitude': -73.583459} longitude = data['coordinates']['longitude'] latitude = data['coordinates']['latitude'] # Location example : 'location': { 'display_address': ['316 Avenue du Mont-Royal E', 'Montreal, QC H2T 1P7', 'Canada']} location = ','.join(data['location']['display_address']) return {"id" : data['id'], "name" : self._add_escape_character(data['name']), "image_url" : data['image_url'], "url" : data['url'], "review_count" : data['review_count'], "categories" : categories, "rating" : data['rating'], "latitude" : latitude, "longitude" : longitude, "price" : data['price'], "location" : location, "display_phone" : data['display_phone'] } def _add_escape_character(self, data): return data.replace("'", "''") def get_results(self): return [self._parse_results(business) for business in self._business_list]
47.512821
139
0.615547
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import psycopg2 import configparser from pathlib import Path from queries import create_business_schema, create_business_table config = configparser.ConfigParser() config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg")) class DatabaseDriver: def __init__(self): self._conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['DATABASE'].values())) self._cur = self._conn.cursor() def execute_query(self, query): self._cur.execute(query) def setup(self): self.execute_query(create_business_schema) self.execute_query(create_business_table)
30.8
123
0.685039
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import configparser from pathlib import Path from businesssearch import BusinessSearch from queries import create_business_schema, create_business_table, insert_business_table from databasedriver import DatabaseDriver import argparse config = configparser.ConfigParser() config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg")) parser = argparse.ArgumentParser( description="A Example yelp business finder based on parameters such as term, location, price, ") api_key = config['KEYS']['API_KEY'] headers = {'Authorization': 'Bearer %s' % api_key} def to_string(data): return [str(value) for value in data.values()] def main(): args = parser.parse_args() # Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$. b = BusinessSearch(term=args.term, location=args.location, price=args.price) db = DatabaseDriver() db.setup() queries = [insert_business_table.format(*to_string(result)) for result in b.get_results()] query_to_execute = "BEGIN; \n" + '\n'.join(queries) + "\nCOMMIT;" db.execute_query(query_to_execute) if __name__ == "__main__": parser._action_groups.pop() required = parser.add_argument_group('required arguments') optional = parser.add_argument_group('optional arguments') required.add_argument("-t", "--term", metavar='', required=True, help="Search term, for example \"food\" or \"restaurants\". The term may also be business names, such as \"Starbucks.\".") required.add_argument("-l", "--location", metavar='', required=True, help="This string indicates the geographic area to be used when searching for businesses. ") optional.add_argument("-p", "--price", type=int, metavar='', required=False, default=1, help="Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.") main()
44.690476
148
0.657977
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
create_business_schema = """CREATE SCHEMA IF NOT EXISTS yelp;""" create_business_table = """ CREATE TABLE IF NOT EXISTS yelp.business ( business_id varchar PRIMARY KEY, business_name varchar, image_url varchar, url varchar, review_count int, categories varchar, rating float, latitude float, longitude float, price varchar, location varchar, phone varchar ); """ insert_business_table = """INSERT INTO yelp.business VALUES ('{}', '{}', '{}', '{}', {}, '{}', {}, {}, {}, '{}', '{}', '{}') ON CONFLICT (business_id) DO UPDATE SET business_id = EXCLUDED.business_id, business_name = EXCLUDED.business_name, image_url = EXCLUDED.image_url, url = EXCLUDED.url, review_count = EXCLUDED.review_count, categories = EXCLUDED.categories, rating = EXCLUDED.rating, latitude = EXCLUDED.latitude, longitude = EXCLUDED.longitude, price = EXCLUDED.price, location = EXCLUDED.location, phone = EXCLUDED.phone; """
35.371429
124
0.505503
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import requests from auth import headers import json class Request: def __init__(self): self._header = headers @staticmethod def get_content(url, param): response = requests.get(url, headers=headers, params=param) if response.status_code == 200: return json.loads(response.content) else: print(f"Request completed with Error. Response Code : {response.status_code}") return None
27.875
90
0.635575
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import configparser import psycopg2 from sql_queries import copy_table_queries, insert_table_queries def load_staging_tables(cur, conn): for query in copy_table_queries: cur.execute(query) conn.commit() def insert_tables(cur, conn): for query in insert_table_queries: cur.execute(query) conn.commit() def main(): config = configparser.ConfigParser() config.read('dwh.cfg') conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values())) cur = conn.cursor() load_staging_tables(cur, conn) insert_tables(cur, conn) conn.close() if __name__ == "__main__": main()
20.625
112
0.638205
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import configparser import psycopg2 from sql_queries import create_table_queries, drop_table_queries def drop_tables(cur, conn): for query in drop_table_queries: cur.execute(query) conn.commit() def create_tables(cur, conn): for query in create_table_queries: cur.execute(query) conn.commit() def main(): config = configparser.ConfigParser() config.read('dwh.cfg') conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values())) cur = conn.cursor() drop_tables(cur, conn) create_tables(cur, conn) conn.close() if __name__ == "__main__": main()
20
112
0.636364
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
from create_tables import main as create_table_main from etl import main as etl_main if __name__ == "__main__": create_table_main() etl_main()
20.857143
51
0.664474
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import boto3 import configparser from botocore.exceptions import ClientError import json import logging import logging.config from pathlib import Path import argparse import time # Setting up logger, Logger properties are defined in logging.ini file logging.config.fileConfig(f"{Path(__file__).parents[0]}/logging.ini") logger = logging.getLogger(__name__) # Loading cluster configurations from cluster.config config = configparser.ConfigParser() config.read_file(open('cluster.config')) def create_IAM_role(iam_client): """ Create and IAM_role, Define configuration in cluster.config :param iam_client: an IAM service client instance :return: True if IAM role created and policy applied successfully. """ role_name = config.get('IAM_ROLE', 'NAME') role_description = config.get('IAM_ROLE', 'DESCRIPTION') role_policy_arn = config.get('IAM_ROLE','POLICY_ARN') logging.info(f"Creating IAM role with name : {role_name}, description : {role_description} and policy : {role_policy_arn}") # Creating Role. # Policy Documentation reference - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#aws-resource-iam-role--examples role_policy_document = json.dumps( { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "redshift.amazonaws.com" ] }, "Action": [ "sts:AssumeRole" ] } ] } ) try: create_response = iam_client.create_role( Path='/', RoleName=role_name, Description=role_description, AssumeRolePolicyDocument = role_policy_document ) logger.debug(f"Got response from IAM client for creating role : {create_response}") logger.info(f"Role create response code : {create_response['ResponseMetadata']['HTTPStatusCode']}") except Exception as e: logger.error(f"Error occured while creating role : {e}") return False try: # Attaching policy using ARN's( Amazon Resource Names ) policy_response = iam_client.attach_role_policy( RoleName=role_name, PolicyArn=role_policy_arn ) logger.debug(f"Got response from IAM client for applying policy to role : {policy_response}") logger.info(f"Attach policy response code : {policy_response['ResponseMetadata']['HTTPStatusCode']}") except Exception as e: logger.error(f"Error occured while applying policy : {e}") return False return True if( (create_response['ResponseMetadata']['HTTPStatusCode'] == 200) and (policy_response['ResponseMetadata']['HTTPStatusCode'] == 200) ) else False def delete_IAM_role(iam_client): """ Delete and IAM role Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance. :param iam_client: an IAM service client instance :return: True if role deleted successfully. """ role_name = config.get('IAM_ROLE', 'NAME') existing_roles = [role['RoleName'] for role in iam_client.list_roles()['Roles']] if(role_name not in existing_roles): logger.info(f"Role {role_name} does not exist.") return True logger.info(f"Processing deleting IAM role : {role_name}") try: detach_response = iam_client.detach_role_policy(RoleName=role_name, PolicyArn=config.get('IAM_ROLE','POLICY_ARN')) logger.debug(f"Response for policy detach from IAM role : {detach_response}") logger.info(f"Detach policy response code : {detach_response['ResponseMetadata']['HTTPStatusCode']}") delete_response = iam_client.delete_role(RoleName=role_name) logger.debug(f"Response for deleting IAM role : {delete_response}") logger.info(f"Delete role response code : {delete_response['ResponseMetadata']['HTTPStatusCode']}") except Exception as e: logger.error(f"Exception occured while deleting role : {e}") return False return True if( (detach_response['ResponseMetadata']['HTTPStatusCode'] == 200) and (delete_response['ResponseMetadata']['HTTPStatusCode'] == 200) ) else False def create_cluster(redshift_client, iam_role_arn, vpc_security_group_id): """ Create a Redshift cluster using the IAM role and security group created. :param redshift_client: a redshift client instance :param iam_role_arn: IAM role arn to give permission to cluster to communicate with other AWS service :param vpc_security_group_id: vpc group for network setting for cluster :return: True if cluster created successfully. """ # Cluster Hardware config cluster_type = config.get('DWH','DWH_CLUSTER_TYPE') node_type = config.get('DWH', 'DWH_NODE_TYPE') num_nodes = int(config.get('DWH', 'DWH_NUM_NODES')) # Cluster identifiers and credentials cluster_identifier = config.get('DWH','DWH_CLUSTER_IDENTIFIER') db_name = config.get('DWH', 'DWH_DB') database_port=int(config.get('DWH','DWH_PORT')) master_username = config.get('DWH', 'DWH_DB_USER') master_user_password = config.get('DWH', 'DWH_DB_PASSWORD') # Cluster adding IAM role iam_role = None # Security settings security_group = config.get('SECURITY_GROUP', 'NAME') # Documentation - https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html?highlight=create_cluster#Redshift.Client.create_cluster try: response = redshift_client.create_cluster( DBName=db_name, ClusterIdentifier=cluster_identifier, ClusterType=cluster_type, NodeType=node_type, NumberOfNodes=num_nodes, MasterUsername=master_username, MasterUserPassword=master_user_password, VpcSecurityGroupIds=vpc_security_group_id, IamRoles = [iam_role_arn] ) logger.debug(f"Cluster creation response : {response}") logger.info(f"Cluster creation response code : {response['ResponseMetadata']['HTTPStatusCode']} ") except Exception as e: logger.error(f"Exception occured while creating cluster : {e}") return False return (response['ResponseMetadata']['HTTPStatusCode'] == 200) def get_cluster_status(redshift_client, cluster_identifier): response = redshift_client.describe_clusters(ClusterIdentifier = cluster_identifier) cluster_status = response['Clusters'][0]['ClusterStatus'] logger.info(f"Cluster status : {cluster_status.upper()}") return True if(cluster_status.upper() in ('AVAILABLE','ACTIVE', 'INCOMPATIBLE_NETWORK', 'INCOMPATIBLE_HSM', 'INCOMPATIBLE_RESTORE', 'INSUFFICIENT_CAPACITY', 'HARDWARE_FAILURE')) else False def delete_cluster(redshift_client): """ Deleting the redshift cluster :param redshift_client: a redshift client instance :return: True if cluster deleted successfully. """ cluster_identifier = config.get('DWH', 'DWH_CLUSTER_IDENTIFIER') if(len(redshift_client.describe_clusters()['Clusters']) == 0): logger.info(f"Cluster {cluster_identifier} does not exist.") return True try: while(not get_cluster_status(redshift_client, cluster_identifier=cluster_identifier)): logger.info("Can't delete cluster. Waiting for cluster to become ACTIVE") time.sleep(10) response = \ redshift_client.delete_cluster(ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True) logger.debug(f"Cluster deleted with response : {response}") logger.info(f"Cluster deleted response code : {response['ResponseMetadata']['HTTPStatusCode']}") except Exception as e: logger.error(f"Exception occured while deleting cluster : {e}") return False return response['ResponseMetadata']['HTTPStatusCode'] def get_group(ec2_client, group_name): groups = \ ec2_client.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [group_name]}])[ 'SecurityGroups'] return None if(len(groups) == 0) else groups[0] def create_ec2_security_group(ec2_client): if(get_group(ec2_client, config.get('SECURITY_GROUP','NAME')) is not None): logger.info("Group already exists!!") return True # Fetch VPC ID vpc_id = ec2_client.describe_security_groups()['SecurityGroups'][0]['VpcId'] response = ec2_client.create_security_group( Description=config.get('SECURITY_GROUP','DESCRIPTION'), GroupName=config.get('SECURITY_GROUP','NAME'), VpcId=vpc_id, DryRun=False # Checks whether you have the required permissions for the action, without actually making the request, and provides an error response ) logger.debug(f"Security group creation response : {response}") logger.info(f"Group created!! Response code {response['ResponseMetadata']['HTTPStatusCode']}") logger.info("Authorizing security group ingress") ec2_client.authorize_security_group_ingress( GroupId=response['GroupId'], GroupName=config.get('SECURITY_GROUP','NAME'), FromPort=int(config.get('INBOUND_RULE','PORT_RANGE')), ToPort=int(config.get('INBOUND_RULE', 'PORT_RANGE')), CidrIp=config.get('INBOUND_RULE','CIDRIP'), IpProtocol=config.get('INBOUND_RULE','PROTOCOL'), DryRun=False ) return (response['ResponseMetadata']['HTTPStatusCode'] == 200) def delete_ec2_security_group(ec2_client): """ Delete a security group :param ec2_client: ec2 client instance :return: True if security group deleted successfully """ group_name = config.get('SECURITY_GROUP','NAME') group = get_group(ec2_client, group_name) if(group is None): logger.info(f"Group {group_name} does not exist") return True try: response = ec2_client.delete_security_group( GroupId=group['GroupId'], GroupName=group_name, DryRun=False ) logger.debug(f"Deleting security group response : {response}") logger.info(f"Delete response {response['ResponseMetadata']['HTTPStatusCode']}") except Exception as e: logger.error(f"Error occured while deleting group : {e}") return False return (response['ResponseMetadata']['HTTPStatusCode'] == 200) def boolean_parser(val): if val.upper() not in ['FALSE', 'TRUE']: logging.error(f"Invalid arguemnt : {val}. Must be TRUE or FALSE") raise ValueError('Not a valid boolean string') return val.upper() == 'TRUE' if __name__ == "__main__": # Parsing arguments parser = argparse.ArgumentParser(description="A Redshift cluster IaC (Infrastructure as Code). It creates IAM role for the Redshift, creates security group and sets up ingress parameters." " Finally spin-up a redshift cluster.") parser._action_groups.pop() required = parser.add_argument_group('required arguments') optional = parser.add_argument_group('optional arguments') required.add_argument("-c", "--create", type=boolean_parser, metavar='', required=True, help="True or False. Create IAM roles, security group and redshift cluster if ie does not exist.") required.add_argument("-d", "--delete", type=boolean_parser, metavar='', required=True, help="True or False. Delete the roles, securitygroup and cluster. WARNING: Deletes the Redshift cluster, IAM role and security group. ") optional.add_argument("-v", "--verbosity", type=boolean_parser, metavar='', required=False, default=True, help="Increase output verbosity. Default set to DEBUG.") args = parser.parse_args() logger.info(f"ARGS : {args}") if(not args.verbosity): logger.setLevel(logging.INFO) logger.info("LOGGING LEVEL SET TO INFO.") # print(boto3._get_default_session().get_available_services() ) # Getting aws services list # Creating low-level service clients ec2 = boto3.client(service_name = 'ec2', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET')) s3 = boto3.client(service_name = 's3', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET')) iam = boto3.client(service_name = 'iam', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET')) redshift = boto3.client(service_name = 'redshift', region_name = 'us-east-1', aws_access_key_id=config.get('AWS', 'Key'), aws_secret_access_key=config.get('AWS', 'SECRET')) logger.info("Clients setup for all services.") # Setting up IAM Role, security group and cluster if(args.create): if(create_IAM_role(iam)): logger.info("IAM role created. Creating security group....") if(create_ec2_security_group(ec2)): logger.info("Security group created. Spinning redshift cluster....") role_arn = iam.get_role(RoleName = config.get('IAM_ROLE', 'NAME'))['Role']['Arn'] vpc_security_group_id = get_group(ec2, config.get('SECURITY_GROUP', 'NAME'))['GroupId'] create_cluster(redshift, role_arn, [vpc_security_group_id]) else: logger.error("Failed to create security group") else: logger.error("Failed to create IAM role") else: logger.info("Skipping Creation.") # cleanup if(args.delete): delete_cluster(redshift) delete_ec2_security_group(ec2) delete_IAM_role(iam)
42.537267
192
0.657868
Udacity-Data-Engineering-Projects
https://github.com/san089/Udacity-Data-Engineering-Projects
Few projects related to Data Engineering including Data Modeling, Infrastructure setup on cloud, Data Warehousing and Data Lake development.
1,219
433
2023-12-04 20:08:27+00:00
2020-01-20 22:50:03+00:00
2,128
Other
Python
import psycopg2 import configparser # Loading cluster configurations from cluster.config config = configparser.ConfigParser() config.read_file(open('cluster.config')) def test_connection(host): dbname = config.get('DWH','DWH_DB') port = config.get('DWH','DWH_PORT') user = config.get('DWH','DWH_DB_USER') password = config.get('DWH','DWH_DB_PASSWORD') con=psycopg2.connect(dbname= dbname, host=host, port= port, user= user, password= password) cur = con.cursor() cur.execute("CREATE TABLE test (id int);") cur.execute("INSERT INTO test VALUES (10);") print(cur.execute('SELECT * FROM test')) con.close()
28.590909
95
0.683077
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
0
0
0
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import json from concurrent.futures import ThreadPoolExecutor from retry import RetryOnException as retry from proxypool import ( ProxyPoolValidator, ProxyPoolScraper, RedisProxyPoolClient ) from airflow.models.baseoperator import BaseOperator from airflow.utils.decorators import apply_defaults class ProxyPoolOperator(BaseOperator): @apply_defaults def __init__( self, proxy_webpage, number_of_proxies, testing_url, max_workers, redis_config, redis_key, *args, **kwargs): super().__init__(*args, **kwargs) self.proxy_webpage = proxy_webpage self.testing_url = testing_url self.number_of_proxies = number_of_proxies self.max_workers = max_workers self.redis_config = redis_config self.redis_key = redis_key @retry(5) def execute(self, context): proxy_scraper = ProxyPoolScraper(self.proxy_webpage) proxy_validator = ProxyPoolValidator(self.testing_url) proxy_stream = proxy_scraper.get_proxy_stream(self.number_of_proxies) with ThreadPoolExecutor(max_workers=self.max_workers) as executor: results = executor.map( proxy_validator.validate_proxy, proxy_stream ) valid_proxies = filter(lambda x: x.is_valid is True, results) sorted_valid_proxies = sorted( valid_proxies, key=lambda x: x.health, reverse=True ) with RedisProxyPoolClient(self.redis_key, self.redis_config) as client: client.override_existing_proxies( [ json.dumps(record.proxy) for record in sorted_valid_proxies[:5] ] )
31.160714
79
0.611111
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from log import log from retry import RetryOnException as retry from proxypool import RedisProxyPoolClient from rss_news import ( NewsProducer, NewsExporter, NewsValidator ) from airflow.models.baseoperator import BaseOperator from airflow.utils.decorators import apply_defaults @log class RSSNewsOperator(BaseOperator): @apply_defaults def __init__( self, validator_config, rss_feed, language, redis_config, redis_key, bootstrap_servers, topic, *args, **kwargs): super().__init__(*args, **kwargs) self.validator_config = validator_config self.rss_feed = rss_feed self.language = language self.redis_config = redis_config self.redis_key = redis_key self.bootstrap_servers = bootstrap_servers self.topic = topic @retry(5) def execute(self, context): validator = NewsValidator(self.validator_config) producer = NewsProducer(self.rss_feed, self.language) redis = RedisProxyPoolClient(self.redis_key, self.redis_config) with NewsExporter(self.bootstrap_servers) as exporter: proxy = redis.get_proxy() self.logger.info(proxy) try: for news in producer.get_news_stream(proxy): self.logger.info(news) validator.validate_news(news) exporter.export_news_to_broker( self.topic, news.as_dict() ) except Exception as err: redis.lpop_proxy() self.logger.error(f"Exception: {err}") raise err
29.293103
71
0.573462
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
class Config: PROXY_WEBPAGE = "https://free-proxy-list.net/" TESTING_URL = "https://google.com" REDIS_CONFIG = { "host": "redis", "port": "6379", "db": 0 } REDIS_KEY = "proxies" MAX_WORKERS = 50 NUMBER_OF_PROXIES = 50 RSS_FEEDS = { "en": [ "https://www.goal.com/feeds/en/news", "https://www.eyefootball.com/football_news.xml", "https://www.101greatgoals.com/feed/", "https://sportslens.com/feed/", "https://deadspin.com/rss" ], "pl": [ "https://weszlo.com/feed/", "https://sportowefakty.wp.pl/rss.xml", "https://futbolnews.pl/feed", "https://igol.pl/feed/" ], "es": [ "https://as.com/rss/tags/ultimas_noticias.xml", "https://e00-marca.uecdn.es/rss/futbol/mas-futbol.xml", "https://www.futbolred.com/rss-news/liga-de-espana.xml", "https://www.futbolya.com/rss/noticias.xml" ], "de": [ "https://www.spox.com/pub/rss/sport-media.xml", "https://www.dfb.de/news/rss/feed/" ] } BOOTSTRAP_SERVERS = ["kafka:9092"] TOPIC = "rss_news" VALIDATOR_CONFIG = { "description_length": 10, "languages": [ "en", "pl", "es", "de" ] }
23.280702
68
0.484454
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from urllib.parse import urlparse from datetime import datetime from airflow import DAG from airflow.operators.python_operator import PythonOperator from dags_config import Config as config from custom_operators import ( ProxyPoolOperator, RSSNewsOperator ) def extract_feed_name(url): parsed_url = urlparse(url) return parsed_url.netloc.replace("www.", "") def dummy_callable(action): return f"{datetime.now()}: {action} scrapping RSS feeds!" def export_events(config, rss_feed, language, dag): feed_name = extract_feed_name(rss_feed) return RSSNewsOperator( task_id=f"exporting_{feed_name}_news_to_broker", validator_config=config.VALIDATOR_CONFIG, rss_feed=rss_feed, language=language, redis_config=config.REDIS_CONFIG, redis_key=config.REDIS_KEY, bootstrap_servers=config.BOOTSTRAP_SERVERS, topic=config.TOPIC, dag=dag ) def create_dag(dag_id, interval, config, language, rss_feeds): with DAG( dag_id=dag_id, description=f"Scrape latest ({language}) sport RSS feeds", schedule_interval=interval, start_date=datetime(2020, 1, 1), catchup=False, is_paused_upon_creation=False ) as dag: start = PythonOperator( task_id="starting_pipeline", python_callable=dummy_callable, op_kwargs={"action": "starting"}, dag=dag ) proxypool = ProxyPoolOperator( task_id="updating_proxypoool", proxy_webpage=config.PROXY_WEBPAGE, number_of_proxies=config.NUMBER_OF_PROXIES, testing_url=config.TESTING_URL, max_workers=config.NUMBER_OF_PROXIES, redis_config=config.REDIS_CONFIG, redis_key=config.REDIS_KEY, dag=dag ) events = [ export_events(config, rss_feed, language, dag) for rss_feed in rss_feeds ] finish = PythonOperator( task_id="finishing_pipeline", python_callable=dummy_callable, op_kwargs={"action": "finishing"}, dag=dag ) start >> proxypool >> events >> finish return dag for n, item in enumerate(config.RSS_FEEDS.items()): language, rss_feeds = item dag_id = f"rss_news_{language}" interval = f"{n*4}-59/10 * * * *" globals()[dag_id] = create_dag( dag_id, interval, config, language, rss_feeds )
25.861702
66
0.604596
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import logging class Logger: __register = False def __init__(self): if not self.__register: self._init_default_register() def _init_default_register(self): logger = logging.getLogger() logger.setLevel(logging.INFO) Logger.__register = True logging.info("Logger initialized") def get_logger(self, filename): return logging.getLogger(filename) def log(cls): cls.logger = Logger().get_logger(cls.__name__) return cls
20.083333
50
0.613861
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
headers_list = [ { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1" }, { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1" }, { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding": "gzip, deflate", "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8", "Dnt": "1", "Referer": "https://www.google.com/", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", "X-Amzn-Trace-Id": "Root=1-5ee7bae0-82260c065baf5ad7f0b3a3e3" }, { "User-Agent": 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0', "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7", "Referer": "https://www.reddit.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1" } ]
42.609756
146
0.564633
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import re import random from contextlib import closing from requests import get from log import log from parser.random_headers_list import headers_list @log class WebParser: def __init__(self, website_url, rotate_header=True): self.url = website_url self._rotate_header = rotate_header def get_random_header(self): if self._rotate_header: return random.choice(headers_list) def get_content(self, timeout=30, proxies=None): kwargs = { "timeout": timeout, "proxies": proxies, "headers": self.get_random_header() } try: with closing(get(self.url, **kwargs)) as response: if self.is_good_response(response): return ( response.content ) except Exception as err: self.logger.info(f"Error occurred: {err}") @staticmethod def is_good_response(response): content_type = response.headers['Content-Type'].lower() return ( response.status_code == 200 and content_type is not None ) def __str__(self): domain = re.sub("(http[s]?://|www.)", "", self.url) return f"WebParser of {domain.upper()}"
26.934783
63
0.566199
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from bs4 import BeautifulSoup from dataclasses import dataclass, field from parser import WebParser from log import log @dataclass class ProxyRecord: ip_address: str port: int country_code: str country: str anonymity: str google: str https: str last_checked: str proxy: dict = field(init=False, default=None) def __post_init__(self): self.proxy = self.format_proxy() def format_proxy(self): protocol = "https" if self.https == "yes" else "http" url = f"{protocol}://{self.ip_address}:{self.port}" return {"http": url, "https": url} @log class ProxyPoolScraper: def __init__(self, url, bs_parser="lxml"): self.parser = WebParser(url) self.bs_parser = bs_parser def get_proxy_stream(self, limit): raw_records = self.extract_table_raw_records() clean_records = list( map(self._clear_up_record, raw_records) ) for record in clean_records[:limit]: self.logger.info(f"Proxy record: {record}") if record: yield ProxyRecord(*record) def extract_table_raw_records(self): content = self.parser.get_content() soup_object = BeautifulSoup(content, self.bs_parser) return ( soup_object .find(id="list") .find_all("tr") ) def _clear_up_record(self, raw_record): return [ val.text for val in raw_record.find_all("td") ]
25.103448
61
0.58427
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import time from dataclasses import dataclass from parser import WebParser from log import log @dataclass(frozen=True) class ProxyStatus: proxy: str health: float is_valid: bool @log class ProxyPoolValidator: def __init__(self, url, timeout=10, checks=3, sleep_interval=0.1): self.timeout = timeout self.checks = checks self.sleep_interval = sleep_interval self.parser = WebParser(url, rotate_header=True) def validate_proxy(self, proxy_record): consecutive_checks = [] for _ in range(self.checks): content = self.parser.get_content( timeout=self.timeout, proxies=proxy_record.proxy ) time.sleep(self.sleep_interval) consecutive_checks.append(int(content is not None)) health = sum(consecutive_checks) / self.checks proxy_status = ProxyStatus( proxy=proxy_record.proxy, health=health, is_valid=health > 0.66 ) self.logger.info(f"Proxy status: {proxy_status}") return proxy_status
26.775
70
0.616216
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import json import redis from log import log @log class RedisProxyPoolClient: def __init__(self, key, redis_config): self.key = key self.redis = redis.StrictRedis( **redis_config ) def __enter__(self): return self def override_existing_proxies(self, proxies): self.logger.info(f"Overriding existing proxies {proxies}") self.redis.delete(self.key) self.redis.lpush(self.key, *proxies) def list_existing_proxies(self): response = self.redis.lrange(self.key, 0, -1) return [ json.loads(proxy) for proxy in response ] def get_proxy(self): existing_proxies = self.list_existing_proxies() if len(existing_proxies) > 0: return existing_proxies[0] def lpop_proxy(self): self.logger.info("Deleting proxy!") self.redis.lpop(self.key) def __exit__(self, type, value, traceback): client_id = self.redis.client_id() self.redis.client_kill_filter( _id=client_id )
24.5
66
0.594393
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import functools from log import log @log class RetryOnException: def __init__(self, retries): self._retries = retries def __call__(self, function): functools.update_wrapper(self, function) def wrapper(*args, **kwargs): self.logger.info(f"Retries: {self._retries}") while self._retries != 0: try: return function(*args, **kwargs) self._retries = 0 except Exception as err: self.logger.info(f"Error occured: {err}") self._retries -= 1 self._raise_on_condition(self._retries, err) return wrapper def _raise_on_condition(self, retries, exception): if retries == 0: raise exception else: self.logger.info(f"Retries: {retries}")
28
64
0.527043
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import json import time from kafka import KafkaProducer class NewsExporter: def __init__(self, bootstrap_servers): self._producer = self._connect_producer( bootstrap_servers ) def _connect_producer(self, bootstrap_servers): def encode_news(value): return json.dumps(value).encode("utf-8") producer = KafkaProducer( bootstrap_servers=bootstrap_servers, value_serializer=lambda x: encode_news(x) ) return producer def __enter__(self): return self def export_news_to_broker(self, topic, record, sleep_time=0.01): response = self._producer.send( topic, value=record ) time.sleep(sleep_time) return response.get( timeout=60 ) def __exit__(self, type, value, traceback): self._producer.close()
23.432432
68
0.581395
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import re from dataclasses import dataclass import atoma from parser import WebParser @dataclass(frozen=True) class News: _id: str title: str link: str published: str description: str author: str language: str def as_dict(self): return self.__dict__ class NewsProducer: def __init__(self, rss_feed, language): self.parser = WebParser(rss_feed, rotate_header=True) self.formatter = NewsFormatter(language) def _extract_news_feed_items(self, proxies): content = self.parser.get_content(proxies=proxies) news_feed = atoma.parse_rss_bytes(content) return news_feed.items def get_news_stream(self, proxies): news_feed_items = self._extract_news_feed_items(proxies) for entry in news_feed_items: formatted_entry = self.formatter.format_entry(entry) yield formatted_entry class NewsFormatter: def __init__(self, language): self.language = language self.date_format = "%Y-%m-%d %H:%M:%S" self.id_regex = "[^0-9a-zA-Z_-]+" self.default_author = "Unknown" def format_entry(self, entry): description = self.format_description(entry) return News( self.construct_id(entry.title), entry.title, entry.link, self.unify_date(entry.pub_date), description, self.assign_author(entry.author), self.language ) def construct_id(self, title): return re.sub(self.id_regex, "", title).lower() def unify_date(self, date): return date.strftime(self.date_format) def assign_author(self, author): return self.default_author if not author else author def format_description(self, entry): tmp_description = re.sub("<.*?>", "", entry.description[:1000]) index = tmp_description.rfind(".") short_description = tmp_description[:index+1] return ( short_description if short_description else entry.title )
26.945946
71
0.610547
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
class NewsValidator: def __init__(self, config): self._config = config def validate_news(self, news): news = news.as_dict() assert self.check_languages(news), "Wrong language!" assert self.check_null_values(news), "Null values!" assert self.check_description_length(news), "Short description!" def check_null_values(self, news): news_values = list(news.values()) return all(news_values) def check_description_length(self, news): description_length = self._config.get("description_length") return len(news.get("description")) >= description_length def check_languages(self, news): languages = self._config.get("languages") lang = news.get("language") return any( filter(lambda x: x == lang, languages) )
30.333333
72
0.620118
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from pkg_resources import resource_string import pytest import fakeredis from parser import WebParser from requests import Response from rss_news import NewsProducer, NewsFormatter, NewsValidator, News from proxypool import ProxyPoolScraper, ProxyRecord from retry import RetryOnException as retry TEST_URL = "https://test.com" @pytest.fixture def web_parser(): yield WebParser(TEST_URL) @pytest.fixture def scraper(): yield ProxyPoolScraper(TEST_URL) @pytest.fixture def proxies(): yield [ { "http": "http://127.0.0.1:8080", "https": "http://127.0.0.1:8080" } ] @pytest.fixture def proxy_record(): yield ProxyRecord( "127.0.0.1", 8080, "PL", "POLAND", "gold", "no", "no", "30 minutes ago" ) @pytest.fixture def producer(): yield NewsProducer(TEST_URL, "en") @pytest.fixture def formatter(): yield NewsFormatter("en") @pytest.fixture def validator(): yield NewsValidator( { "description_length": 10, "languages": ["en"] } ) @pytest.fixture def news_record(): yield News( "test_id", "test_title", "test_link", "test_pub", "test_desc", "test_author", "en" ) @pytest.fixture def redis_mock(): yield fakeredis.FakeStrictRedis() @pytest.fixture def redis_config(): yield { "host": "redis", "port": "6379", "db": 0 } @pytest.fixture def response(): def helper(status_code): response = Response() response.status_code = status_code response.headers['Content-Type'] = "text/html" return response yield helper @pytest.fixture def raw_content(): def helper(filename): return resource_string( "tests", f"dataresources/{filename}" ) yield helper @pytest.fixture def add_function(): @retry(5) def func(a, b): return a + b yield func
15.708333
69
0.590818
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from proxypool import ProxyRecord from unittest.mock import patch from ..fixtures import web_parser, scraper, raw_content @patch("parser.web_parser.WebParser.get_content") def test_get_proxy_stream(get_content, raw_content, web_parser, scraper): get_content.return_value = raw_content("proxy_list_file.txt") scraper.parser = web_parser stream = scraper.get_proxy_stream(5) result = list(stream)[-1] assert isinstance(result, ProxyRecord)
26.352941
73
0.741379
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from unittest.mock import patch from proxypool import ProxyPoolValidator from ..fixtures import web_parser, raw_content, proxy_record @patch("parser.web_parser.WebParser.get_content") def test_validate_proxy(get_content, raw_content, web_parser, proxy_record): expected = True get_content.return_value = raw_content("proxy_list_file.txt") validator = ProxyPoolValidator("https://google.com", sleep_interval=0) validator.parser = web_parser proxy_record = validator.validate_proxy(proxy_record) result = proxy_record.is_valid assert result == expected @patch("parser.web_parser.WebParser.get_content") def test_invalid_proxy(get_content, raw_content, web_parser, proxy_record): expected = False get_content.return_value = None validator = ProxyPoolValidator("https://google.com", sleep_interval=0) validator.parser = web_parser proxy_record = validator.validate_proxy(proxy_record) result = proxy_record.is_valid assert result == expected @patch("parser.web_parser.WebParser.get_content") def test_unstable_valid_proxy(get_content, raw_content, web_parser, proxy_record): expected = True valid_content = raw_content("proxy_list_file.txt") get_content.side_effect = [valid_content, valid_content, None] validator = ProxyPoolValidator("https://google.com", sleep_interval=0) validator.parser = web_parser proxy_record = validator.validate_proxy(proxy_record) result = proxy_record.is_valid assert result == expected assert round(proxy_record.health, 2) == 0.67 @patch("parser.web_parser.WebParser.get_content") def test_unstable_invalid_proxy(get_content, raw_content, web_parser, proxy_record): expected = False valid_content = raw_content("proxy_list_file.txt") get_content.side_effect = [None, None, valid_content] validator = ProxyPoolValidator("https://google.com", sleep_interval=0) validator.parser = web_parser proxy_record = validator.validate_proxy(proxy_record) result = proxy_record.is_valid assert result == expected assert round(proxy_record.health, 2) == 0.33
33.177419
84
0.725685
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import json from unittest.mock import patch from proxypool import RedisProxyPoolClient from ..fixtures import redis_config, redis_mock, proxies @patch("proxypool.redis_proxypool_client.redis.StrictRedis") def test_override_existing_proxies(redis, redis_config, redis_mock, proxies): new_proxies = [{"http": "http://127.0.0.1:8081", "https": "http://127.0.0.1:8081"}] key = "test" redis_mock.lpush(key, *[json.dumps(_) for _ in proxies]) redis_client = RedisProxyPoolClient(key, redis_config) redis_client.redis = redis_mock redis_client.override_existing_proxies( [json.dumps(_) for _ in new_proxies] ) current_proxies = redis_mock.lrange(key, 0, -1) result = [json.loads(_) for _ in current_proxies] assert result != proxies @patch("proxypool.redis_proxypool_client.redis.StrictRedis") def test_list_existing_proxies(redis, redis_config, redis_mock, proxies): key = "test" redis_mock.lpush(key, *[json.dumps(_) for _ in proxies]) redis_client = RedisProxyPoolClient(key, redis_config) redis_client.redis = redis_mock result = redis_client.list_existing_proxies() assert result == proxies @patch("proxypool.redis_proxypool_client.redis.StrictRedis") def test_lpop_proxy(redis, redis_config, redis_mock, proxies): expected = 1 key = "test" redis_mock.lpush(key, *[json.dumps(_) for _ in proxies]) redis_client = RedisProxyPoolClient(key, redis_config) redis_client.redis = redis_mock redis_client.lpop_proxy() assert len(proxies) == expected @patch("proxypool.redis_proxypool_client.redis.StrictRedis") def test_get_proxy(redis, redis_config, redis_mock, proxies): expected = proxies[0] key = "test" redis_mock.lpush(key, *[json.dumps(_) for _ in proxies]) redis_client = RedisProxyPoolClient(key, redis_config) redis_client.redis = redis_mock result = redis_client.get_proxy() assert result == expected @patch("proxypool.redis_proxypool_client.redis.StrictRedis") def test_redis_client_context_manager(redis, redis_config): key = "test" with RedisProxyPoolClient(key, redis_config) as redis_client: pass
28.486486
87
0.695552
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import pytest from ..fixtures import add_function def test_retry_on_exception_valid(add_function): expected = 2 result = add_function(1, 1) assert result == expected def test_retry_on_exception_wrong(add_function): with pytest.raises(TypeError): add_function("Test", 0.0001)
16.111111
48
0.690554
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from unittest.mock import patch, Mock import pytest from rss_news import NewsExporter @patch("rss_news.rss_news_exporter.KafkaProducer") def test_connect_producer(mock_producer): exporter = NewsExporter(["test_broker:9092"]) assert exporter._producer is not None @patch("rss_news.NewsExporter") def test_export_news_to_broker(exporter): topic = "test_topic" news = { "_id": "test_id", "title": "test_title", "link": "www.test.com", "date": "2020-01-01 00:00:00", "description": "Test", "author": "Test", "language": "pl" } exporter.export_news_to_broker(topic, news) exporter.export_news_to_broker.assert_called_once_with( topic, news ) @patch("rss_news.rss_news_exporter.KafkaProducer") def test_export_news_to_broker_context_manager(mock_producer): topic = "test_topic" news = { "_id": "test_id", "title": "test_title", "link": "www.test.com", "date": "2020-01-01 00:00:00", "description": "Test", "author": "Test", "language": "pl" } with NewsExporter(["test_broker:9092"]) as exporter: exporter.export_news_to_broker(topic, news) exporter._producer.send.assert_called_once_with( topic, value=news )
24.843137
62
0.601367
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import datetime from unittest.mock import patch import pytest from rss_news import News from ..fixtures import web_parser, raw_content, producer, proxies, formatter @patch("parser.web_parser.WebParser.get_content") def test_get_news_stream(get_content, web_parser, raw_content, producer, proxies): get_content.return_value = raw_content("rss_news_file.txt") producer.parser = web_parser stream = producer.get_news_stream(proxies) result = list(stream)[-1] assert isinstance(result, News) @pytest.mark.parametrize( "title, expected_id", [ ("example////1 example", "example1example"), ("example%%%%%%%2 example", "example2example"), ("*******example-3_ xx example", "example-3_xxexample")] ) def test_construct_id(formatter, title, expected_id): result = formatter.construct_id(title) assert result == expected_id def test_unify_date(formatter): expected = "2020-05-17 00:00:00" date = datetime.datetime(2020, 5, 17) result = formatter.unify_date(date) assert result == expected def test_format_description(formatter): expected = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.""" class Entry: description = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation""" title = "Lorem ipsum" class EmptyEntry: description = "" title = "Lorem ipsum" result = formatter.format_description(Entry) result_empty = formatter.format_description(EmptyEntry) assert result == expected assert result_empty == EmptyEntry.title @pytest.mark.parametrize( "author, expected",[(None, "Unknown"), ("Test", "Test")] ) def test_assing_author(formatter, author, expected): result = formatter.assign_author(author) assert result == expected
26.657534
82
0.68335
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
import pytest from rss_news import News from ..fixtures import validator, news_record def test_check_null_values(validator, news_record): expected = True news = news_record.as_dict() result = validator.check_null_values(news) assert result is expected def test_check_null_values_with_nones(validator, news_record): expected = False news = news_record.as_dict() news["id"] = None result = validator.check_null_values(news) assert result is expected def test_check_languages(validator, news_record): expected = True news = news_record.as_dict() result = validator.check_languages(news) assert result is expected def test_validate_news_raises_error(validator, news_record): with pytest.raises(AssertionError): validator.validate_news(news_record)
19.095238
62
0.69395
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from unittest.mock import patch import pytest from pytest import fixture from requests.exceptions import ConnectionError from parser import WebParser from ..fixtures import web_parser, response @patch("parser.web_parser.get") def test_get_content(mock_get, web_parser): expected = "TEST CONTENT" mock_get.return_value.content = "TEST CONTENT" mock_get.return_value.status_code = 200 mock_get.return_value.headers['Content-Type'] = "text/html" result = web_parser.get_content() assert result == expected @patch("parser.web_parser.get") def test_get_content_silence_exception(mock_get, web_parser): expected = None mock_get.side_effect = ConnectionError() result = web_parser.get_content() assert result == expected @pytest.mark.parametrize( "status_code, expected", [(200, True), (403, False), (500, False)] ) def test_is_good_response(web_parser, response, status_code, expected): http_response = response(status_code) result = web_parser.is_good_response(http_response) assert result == expected def test_get_random_header(web_parser): expected = "User-Agent" random_header = web_parser.get_random_header() result = list(random_header.keys()) assert expected in result @pytest.mark.parametrize( "url, expected", [ ("https://test.com", "WebParser of TEST.COM"), ("https://www.test.com", "WebParser of TEST.COM"), ("www.test.com", "WebParser of TEST.COM") ] ) def test__str__representation(url, expected): web_parser = WebParser(url) result = str(web_parser) assert result == expected
22.985507
71
0.674123
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
""" Django settings for core project. Generated by 'django-admin startproject' using Django 2.2. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('DJANGO_SECRET', 'default_secret_key') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(",") # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'rest_framework_swagger', 'django_elasticsearch_dsl', 'django_elasticsearch_dsl_drf', 'users', 'news', 'search' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'core.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'core.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'djongo', 'ENFORCE_SCHEMA': True, 'LOGGING': { 'version': 1, 'loggers': { 'djongo': { 'level': 'DEBUG', 'propogate': False, } }, }, 'NAME': 'rss_news', 'CLIENT': { 'host': os.environ['MONGO_HOST'], 'port': 27017, 'username': os.environ['MONGO_USR'], 'password': os.environ['MONGO_PASSWD'], 'authSource': 'admin' } } } ELASTICSEARCH_DSL = { 'default': { 'hosts': f"{os.environ['ELASTIC_HOST']}:9200" }, } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10, 'DEFAULT_PARSER_CLASSES': [ 'rest_framework.parsers.FormParser', 'rest_framework.parsers.MultiPartParser', 'rest_framework.parsers.JSONParser', ] } SWAGGER_SETTINGS = { 'SECURITY_DEFINITIONS': { 'api_key': { 'type': 'apiKey', 'in': 'header', 'name': 'Authorization' }, 'is_authenticated': True, }, } LOGIN_URL='user/register/' AUTH_USER_MODEL = 'users.UserModel'
24.912088
91
0.627784
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from django.urls import path from users.views import UserCreateView, ObtainTokenView app_name = "user" urlpatterns = [ path("register/", UserCreateView.as_view(), name="register"), path("login/", ObtainTokenView.as_view(), name="login") ]
21.818182
65
0.712
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
""" WSGI config for core project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings') application = get_wsgi_application()
21.705882
78
0.766234
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
27.409091
73
0.655449
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from rest_framework import routers from django.contrib import admin from rest_framework.authtoken.models import Token from news.models import News from news.urls import urlpatterns from users.models import UserModel class NewsAdminSite(admin.AdminSite): def get_urls(self): urls = super(NewsAdminSite, self).get_urls() custom_urls = [*urlpatterns] return custom_urls + urls admin_site = NewsAdminSite() admin_site.register(UserModel) admin_site.register(Token) admin_site.register(News)
24
52
0.753817
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from django.apps import AppConfig class UsersConfig(AppConfig): name = 'users'
13.333333
33
0.741176
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from django.db import models from django.contrib.auth.models import ( AbstractBaseUser, BaseUserManager, PermissionsMixin ) class UserManager(BaseUserManager): def create_user(self, email, password=None, **kwargs): if not email: raise ValueError("Must have an email address") email = self.normalize_email(email) user = self.model(email=email, **kwargs) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password): user = self.create_user(email, password) user.is_staff = True user.is_superuser = True user.save(using=self._db) return user class UserModel(AbstractBaseUser, PermissionsMixin): email = models.EmailField(max_length=255, unique=True) name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) objects = UserManager() USERNAME_FIELD = "email"
27.027027
58
0.666023
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from django.contrib.auth import authenticate from rest_framework import serializers from rest_framework.authtoken.models import Token from users.models import UserModel class UserSerializer(serializers.ModelSerializer): class Meta: model = UserModel fields = ("email", "password") extra_kwargs = { "password": { "write_only": True, "min_length": 5 } } def create(self, validated_data): user = UserModel( email=validated_data["email"] ) user.set_password(validated_data["password"]) user.save() return user class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={ "input_tye": "password" }, trim_whitespace=False ) def validate(self, attrs): email = attrs.get("email") password = attrs.get("password") user = authenticate( request=self.context.get("request"), email=email, password=password ) if not user: raise serializers.ValidationError( "Unable to authenticate with provided credentials", code="authentication" ) attrs["user"] = user return attrs
23.614035
67
0.560628
DataEngineeringProject
https://github.com/damklis/DataEngineeringProject
Example end to end data engineering project.
920
192
2023-12-04 19:31:15+00:00
2020-06-30 09:33:56+00:00
1,845
MIT License
Python
from django.test import TestCase from django.urls.resolvers import URLResolver from news.admin import NewsAdminSite class TestAdminSite(TestCase): def setUp(self): self.admin_site = NewsAdminSite() def test_api_urls_in_admin_site(self): expected = "'api/'" urls_objects = self.admin_site.get_urls() urls = list( filter(lambda x: isinstance(x, URLResolver), urls_objects) ) result = urls[0].pattern.describe() self.assertEqual(result, expected)
22.347826
70
0.641791