{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "from awsglue.transforms import *\n",
    "from awsglue.utils import getResolvedOptions\n",
    "from pyspark.context import SparkContext\n",
    "from awsglue.context import GlueContext\n",
    "from awsglue.job import Job\n",
    "\n",
    "## @params: [JOB_NAME]\n",
    "args = getResolvedOptions(sys.argv, ['ETL-Green-Car-Data-2019'])\n",
    "\n",
    "sc = SparkContext()\n",
    "glueContext = GlueContext(sc)\n",
    "spark = glueContext.spark_session\n",
    "job = Job(glueContext)\n",
    "job.init(args['ETL-Green-Car-Data-2019'], args)\n",
    "\n",
    "## setup a data source and it's context name\n",
    "\n",
    "merged_raw_datasource = glueContext.create_dynamic_frame.from_catalog(database = \"db_tlc_etl_green_car\", table_name = \"green\", transformation_ctx = \"merged_raw_datasource\")\n",
    "\n",
    "## set fields mapping schema for the merged original data & drop some fields & change some data type\n",
    "## lpep_pickup_datetime\tpickup_date\ttimestamp\n",
    "## lpep_dropoff_datetime\tdropoff_date\ttimestamp\n",
    "datasource_with_mappingshema = ApplyMapping.apply(frame = merged_raw_datasource, mappings = [ \\\n",
    "    (\"vendorid\", \"long\", \"vendorid\", \"long\"), \n",
    "    (\"lpep_pickup_datetime\", \"string\", \"lpep_pickup_datetime\", \"timestamp\"), (\"lpep_dropoff_datetime\", \"string\", \"lpep_dropoff_datetime\", \"timestamp\"), \n",
    "    (\"ratecodeid\", \"long\", \"ratecodeid\", \"long\"), \n",
    "    (\"pulocationid\", \"long\", \"pulocationid\", \"long\"), (\"dolocationid\", \"long\", \"dolocationid\", \"long\"), \n",
    "    (\"passenger_count\", \"long\", \"passenger_count\", \"long\"), (\"trip_distance\", \"double\", \"trip_distance\", \"double\"), \n",
    "    (\"fare_amount\", \"double\", \"fare_amount\", \"double\"), \n",
    "    (\"extra\", \"double\", \"extra\", \"double\"), (\"mta_tax\", \"double\", \"mta_tax\", \"double\"), \n",
    "    (\"tip_amount\", \"double\", \"tip_amount\", \"double\"), (\"tolls_amount\", \"double\", \"tolls_amount\", \"double\"), \n",
    "    (\"improvement_surcharge\", \"double\", \"improvement_surcharge\", \"double\"), \n",
    "    (\"total_amount\", \"double\", \"total_amount\", \"double\"), (\"payment_type\", \"long\", \"payment_type\", \"long\"), \n",
    "    (\"trip_type\", \"long\", \"trip_type\", \"long\")], transformation_ctx = \"datasource_with_mappingshema\")\n",
    "\n",
    "## do some polishing with the mappinged source\n",
    "## step 1 apply options for resolving processing to return a new table\n",
    "## choice <make_struct> :we create new data storage in s3\n",
    "dataframe_with_resolvechoice_appliyed = ResolveChoice.apply(frame = datasource_with_mappingshema, choice = \"make_struct\", transformation_ctx = \"dataframe_with_resolvechoice_appliyed\")\n",
    "\n",
    "## step 2 Drop Null and NaN Fields\n",
    "## @inputs: [frame = dataframe_with_resolvechoice_appliyed]\n",
    "dataframe_without_nullfields = DropNullFields.apply(frame = dataframe_with_resolvechoice_appliyed, transformation_ctx = \"dataframe_without_nullfields\")\n",
    "\n",
    "'''\n",
    "## step 3\n",
    "## Drop Some Other Fields && add new collum\n",
    "\n",
    "custom_sparkDF = dataframe_without_nullfields.toDF()\n",
    " \n",
    "#add a new column for \"time_dration\"\n",
    "custom_sparkDF = custom_sparkDF.withColumn('time_dration',unix_timestamp(f.col('lpep_dropoff_datetime'))- unix_timestamp(f.col('lpep_pickup_datetime')))\n",
    "\n",
    "# Convert back to a Glue DynamicFrame for further processing.\n",
    "dataframe_without_nullfields = DynamicFrame.fromDF(customDF, glueContext, \"custom_sparkDF\")\n",
    "\n",
    "## drop three more fields\n",
    "## dataframe_without_nullfields = dataframe_without_nullfields.drop_fields(['improvement_surcharge','tolls_amount','mta_tax']) \\\n",
    "\n",
    "'''\n",
    "\n",
    "## write target data to s3 or other data storage\n",
    "# set format :parquet\n",
    "datasink = glueContext.write_dynamic_frame.from_options(frame = dataframe_without_nullfields, \n",
    "                                                         connection_type = \"s3\", \n",
    "                                                         connection_options = {\"path\": \"s3://data-etl-o-target-0/result\"}, \n",
    "                                                         format = \"parquet\", \n",
    "                                                         transformation_ctx = \"datasink\")\n",
    "## commit job and carry it out\n",
    "job.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "rulesult:\n",
    "waitig to finish ,which will result target files\n",
    "s3://data-etl-o-target-0/result/*.parquet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "## step 3\n",
    "## Drop Some Other Fields && add new collum\n",
    "\n",
    "custom_sparkDF = dataframe_without_nullfields.toDF()\n",
    " \n",
    "#add a new column for \"time_dration\"\n",
    "custom_sparkDF = custom_sparkDF.withColumn('time_dration',unix_timestamp(f.col('lpep_dropoff_datetime'))- unix_timestamp(f.col('lpep_pickup_datetime')))\n",
    "\n",
    "# Convert back to a Glue DynamicFrame for further processing.\n",
    "dataframe_without_nullfields = DynamicFrame.fromDF(customDF, glueContext, \"custom_sparkDF\")\n",
    "\n",
    "## drop three more fields\n",
    "## dataframe_without_nullfields = dataframe_without_nullfields.drop_fields(['improvement_surcharge','tolls_amount','mta_tax']) \\\n",
    "\n",
    "'''"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
