import boto3
from boto3 import Session

import os, json, pyspark, time
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import explode, udf, mean, max, min, input_file_name
from pyspark.sql.types import FloatType

sc.stop()
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)
sc.setSystemProperty("com.amazonaws.services.s3.enableV4", "true")

access_id = "AKIAIOFGB7H4GDSE5NJQ"
access_key = "MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat"

hadoop_conf=sc._jsc.hadoopConfiguration()
hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
hadoop_conf.set("com.amazonaws.services.s3.enableV4", "true")
hadoop_conf.set("fs.s3a.access.key", access_id)
hadoop_conf.set("fs.s3a.secret.key", access_key)
hadoop_conf.set("fs.s3a.path.style.access", "true")

hadoop_conf.set("fs.s3a.connection.maximum", "100000")

hadoop_conf.set("fs.s3a.endpoint", "s3.us-east-2.amazonaws.com")
sqlContext = pyspark.SQLContext(sc)

def scrape_date(x):
    ts = str(x)[:10]+"."+str(x)[10:]
    p = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(ts)))
    return p

filename_udf = udf(lambda x: os.path.basename(x).split("_")[1] if len(os.path.basename(x).split("_")) > 1 else "na")
scrape_date_udf = udf(scrape_date)
# df_parquet = sqlContext.read.parquet("s3a://core-products/TopProducts/batch_01/1.gzip.parquet")
# print(df_parquet.count())
# print("------ splitter ------")

df = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json(["file:///home/ubuntu/amazon_data/BestSellers/*"]).withColumn("f", input_file_name())

flat = df.select(df.f, explode("products").alias('asinlist')).select("f", "asinlist.number", "asinlist.asin", "asinlist.name", "asinlist.price", "asinlist.stars", "asinlist.count_reviews", "asinlist.dp_url", "asinlist.img_url", "asinlist.product_review_url", "asinlist.is_prime")
flat = flat.withColumn("filename", filename_udf(df.f)).drop("f")
flat = flat.withColumn("DataExtractionDate", scrape_date_udf(flat.filename))
flat = flat.select(flat.asin.alias('ASIN'), flat.DataExtractionDate, flat.img_url.alias("imageUrl"), flat.number.alias("BestSellerRank"), flat.price.alias("Price"), flat.count_reviews.alias("CustomerReviewCount"), flat.stars.alias("CustomerReviewRating"), flat.is_prime.alias("Prime"))\
.orderBy(flat.number)

print(flat.count())
flat.show()

df2 = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json(["file:///home/ubuntu/amazon_data/BestSellers2/*"]).withColumn("f", input_file_name())

flat2 = df2.select(df2.f, explode("products").alias('asinlist')).select("f", "asinlist.number", "asinlist.asin", "asinlist.name", "asinlist.price", "asinlist.stars", "asinlist.count_reviews", "asinlist.dp_url", "asinlist.img_url", "asinlist.product_review_url", "asinlist.is_prime")
flat2 = flat2.withColumn("filename", filename_udf(df2.f)).drop("f")
flat2 = flat2.withColumn("DataExtractionDate", scrape_date_udf(flat2.filename))
flat2 = flat2.select(flat2.asin.alias('ASIN'), flat2.DataExtractionDate, flat2.img_url.alias("imageUrl"), flat2.number.alias("BestSellerRank"), flat2.price.alias("Price"), flat2.count_reviews.alias("CustomerReviewCount"), flat2.stars.alias("CustomerReviewRating"), flat2.is_prime.alias("Prime"))\
.orderBy(flat2.number)

print(flat2.count())
flat2.show()

flat_union = flat.union(flat2).orderBy(flat.ASIN, flat.BestSellerRank, ascending=False).dropDuplicates(subset = ['ASIN'])
print(flat_union.count())
flat_union.show()

flat.write.save('s3a://core-products/AnalysisResults/output2', format='csv', header=True)