import boto3
from boto3 import Session

import os, json, pyspark
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import explode, udf, mean, max, min, input_file_name
from pyspark.sql.types import FloatType

sc.stop()
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)

access_id = "AKIAIOFGB7H4GDSE5NJQ"
access_key = "MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat"

hadoop_conf=sc._jsc.hadoopConfiguration()
hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
hadoop_conf.set("com.amazonaws.services.s3.enableV4", "true")
hadoop_conf.set("fs.s3a.access.key", access_id)
hadoop_conf.set("fs.s3a.secret.key", access_key)

hadoop_conf.set("fs.s3a.connection.maximum", "100000")

hadoop_conf.set("fs.s3a.endpoint", "s3.us-east-2.amazonaws.com")
sqlContext = pyspark.SQLContext(sc)

filename_udf = udf(lambda x: os.path.basename(x).split("_")[1] if len(os.path.basename(x).split("_")) > 1 else "na")

df = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json(["file:///home/ubuntu/amazon_data/BestSellers/*"]).withColumn("f", input_file_name())
# df.show()
df2 = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json(["file:///home/ubuntu/amazon_data/BestSellers2/*"]).withColumn("f", input_file_name())

# flat = df.select(df.f, explode("products").alias('asinlist')).select("f", "asinlist.number", "asinlist.asin", "asinlist.name", "asinlist.price", "asinlist.stars", "asinlist.count_reviews", "asinlist.dp_url", "asinlist.img_url", "asinlist.product_review_url")\
# .withColumn("filename", filename_udf(df.f)).drop("f")
flat = df.select(explode("products").alias('asinlist')).select("asinlist.number", "asinlist.asin", "asinlist.name", "asinlist.price", "asinlist.stars", "asinlist.count_reviews", "asinlist.dp_url", "asinlist.img_url", "asinlist.product_review_url")
print(flat.count())
flat.show()

flat2 = df2.select(explode("products").alias('asinlist')).select("asinlist.number", "asinlist.asin", "asinlist.name", "asinlist.price", "asinlist.stars", "asinlist.count_reviews", "asinlist.dp_url", "asinlist.img_url", "asinlist.product_review_url")
print(flat2.count())
# flat_union = flat.union(flat2).orderBy(flat.asin, flat.number).dropDuplicates(subset = ['asin'])
flat_union = flat.union(flat2).orderBy(flat.asin, flat.number, ascending=False).dropDuplicates(subset = ['asin'])
# flat2.show()
print(flat_union.count())
flat_union.show(351)
# df3 = flat.join(flat2, "asin", "left").select("asin", flat.number.alias("rank1"), flat2.number.alias("rank2"), flat.name, flat.price)
# df3.show()