import boto3
from boto3 import Session

import os, json, pyspark
# import findspark
# findspark.init()
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import explode, udf, mean, max, min, input_file_name
from pyspark.sql.types import FloatType

sc.stop()
conf = SparkConf().setMaster("local").setAppName("My App")
sc = SparkContext(conf = conf)
# data = [1,2,3,4,5]
# file_path = ""
# s3_file = sc.textFile(file_path).cache()
# print(s3_file)

access_id = "AKIAIOFGB7H4GDSE5NJQ"
# access_key = config.get(aws_profile, "aws_secret_access_key") 
access_key = "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"
hadoop_conf=sc._jsc.hadoopConfiguration()
# # see https://stackoverflow.com/questions/43454117/how-do-you-use-s3a-with-spark-2-1-0-on-aws-us-east-2
hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
hadoop_conf.set("com.amazonaws.services.s3.enableV4", "true")
hadoop_conf.set("fs.s3a.access.key", access_id)
hadoop_conf.set("fs.s3a.secret.key", access_key)

hadoop_conf.set("fs.s3a.connection.maximum", "100000")

# hadoop_conf.set("fs.s3a.endpoint", "s3." + aws_region + ".amazonaws.com")
hadoop_conf.set("fs.s3a.endpoint", "s3.us-east-2.amazonaws.com")
# distData = sc.parallelize(data)
sqlContext = pyspark.SQLContext(sc)
# columns = ['nodeUrl', 'nextPageUrl']
# df = sqlContext.read.json("file:///home/ubuntu/spark-2.4.0-bin-hadoop2.7/amazon_ansi_1046036_1.json").toDF(*columns)
def get_price(x):
    p = x[1:]
    p = float(p)*2
    return p
    
# price_udf = udf(lambda x: x[1:])
# price_udf = udf(get_price)
price_udf = udf(lambda x: x[1:])
rating_udf = udf(lambda x: x.split(" ")[0])
filename_udf = udf(lambda x: os.path.basename(x)[12:])
parent_udf = udf(lambda x: os.path.abspath(x + "/.."))
date_udf = udf(lambda x: x.split("/")[-3])
node_id_udf = udf(lambda x: x.split("/")[-2])
page_udf = udf(lambda x: x.split("/")[-1].split("_")[-1].replace(".json", ""))
# "s3a://AKIAIOFGB7H4GDSE5NJQ:MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat@test4jimmy/2019-03-23/6459248011/amazon_ansi_6459248011_4.json"
df = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json(["file:///home/ubuntu/spark-2.4.0-bin-hadoop2.7/samples2/*"]).withColumn("f", input_file_name())
# df = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json("s3a://core-products/TopProducts/2019-04-03/1095338/*").withColumn("f", input_file_name())
# df = sqlContext.read.option("badRecordsPath", "/home/ubuntu/bad").json("s3a://AKIAIOFGB7H4GDSE5NJQ:MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat@test4jimmy/2019-04-01/10079988011/amazon_ansi_10079988011_1.json").withColumn("f", input_file_name())
# df = sqlContext.read.json("file:///home/ubuntu/spark-2.4.0-bin-hadoop2.7/samples2/*")
flat = df.select("status_code", "f", explode("ansiList").alias('asinlist'))\
.select("status_code", "f", "asinlist.asin", "asinlist.product_price", "asinlist.product_rating")
flat = flat.withColumn("price", price_udf(flat.product_price))\
.withColumn("rating", rating_udf(flat.product_rating))\
.withColumn("nodeid", node_id_udf(flat.f))\
.withColumn("pageno", page_udf(flat.f))\
.withColumn("date", date_udf(flat.f))

# flat = flat.select("nodeid")
flat = flat.groupBy("nodeid").agg({'rating': 'max', 'price': 'mean'})
# .withColumn("parent", parent_udf(flat.f))
# .withColumn("filename", filename_udf(flat.f))\

# .drop("f")
# flat = flat.drop("product_price").drop("product_rating")

# flat = flat.select(mean('price'), mean('rating'))
# flat.write.csv('output2.csv')
# flat.write.save('s3a://AKIAIOFGB7H4GDSE5NJQ:MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat@test4jimmy2/output', format='csv', header=True)
# flat.write.save('s3a://core-products/AnalysisResults/output.json', format='csv', header=True)
print(flat.count())
flat.printSchema()
flat.show()


# flat.write.csv('output2.csv')
# flat.repartition(1).write.format("com.databricks.spark.csv").save("output.csv", header = "true")
# print(flat.())
# r = json_list.rdd.map(lambda json: json["ansiList"])
# rows = r.collect()
# print(rows)
# rows = df.collect()
# print(rows[0]["ansiList"][0]["asin"])
# print("df count is " + str(df.count()))
# df.printSchema()
# df.createOrReplaceTempView("ansis")
# tempdf = sqlContext.sql("SELECT ansiList[0]['asin'] FROM ansis")
# tempdf.show()
# df.select(df["ansiList"][1]["asin"]).show()
# df.show()
# data = sqlContext.read.json("s3n://AKIAIOFGB7H4GDSE5NJQ:MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat@test4jimmy/2019-03-23/6459248011/amazon_ansi_6459248011_4.json")
# sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", "AKIAIOFGB7H4GDSE5NJQ")
# sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", "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")
# rdd = sc.textFile("s3n://AKIAIOFGB7H4GDSE5NJQ:MdMrUky4EUaeCUhJD7fRVypPRbampGszywjzUiat@test4jimmy/2019-03-23/6459248011/amazon_ansi_6459248011_4.json").cache()

# wordcount
# text_file = sc.textFile("file:///home/ubuntu/spark-2.4.0-bin-hadoop2.7/README.md")
# counts = text_file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b)
# print(counts.collect())

# parsed = rdd.map(lambda k,v: json.loads(v))
# summed = parsed.map(lambda detail:list.append((String(['nodeUrl']), String(['mid']), String(['dsrc']))))
# print(rdd.collect().count)
# rdd = sc.hadoopFile("s3n://test4jimmy/2019-03-23/6459248011/amazon_ansi_6459248011_4.json", 
#                     "org.apache.hadoop.mapred.TextInputFormat",
#                     "org.apache.hadoop.io.Text",
#                     "org.apache.hadoop.io.LongWritable", 
#                     conf = {
#     "fs.s3n.awsAccessKeyId": "AKIAIOFGB7H4GDSE5NJQ",
#     "fs.s3n.awsSecretAccessKey": "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"
# })
# x = distData.reduce(lambda a, b: a+b)
# print(x)
sc.stop() 