# -*- coding:utf-8 -*-
"""
pyspark_etl_demo



"""
import findspark
findspark.init()

from pyspark.sql import SparkSession
from pyspark.sql.functions import col

class Extractor:
    def __init__(self):
        self.spark=SparkSession.builder.appName("ETL Process").enableHiveSupport().getOrCreate()
    def read_csv(self,filepath):
        return self.spark.read.csv(filepath,header=True,inferSchema=True)
    def to_hive(self,data,tableName):
        data.write.mode("overweite").saveAsTable(tableName)
    def clean_data(self,data):
        return data.na.drop()
    def transformData(self,data):
        return data.withColumn("amount",col("amount").cast("float"))


class ETLProcess:
    def __init__(self,filepath,tablename):
        self.worker=Extractor()
        self.filepath=filepath
        self.tablename=tablename

    def run(self):
        data=self.worker.read_csv(self.filepath)
        cleanData=self.worker.clean_data(data)
        transformedData=self.worker.transformData(cleanData)
        self.worker.to_hive(transformedData,self.tablename)

if __name__ == '__main__':
    filepath="data/data.csv"
    worker=Extractor(filepath,"u_test")
    worker.run()
    worker.spark.stop()