# coding=utf-8
import os
import io
import botocore
import pandas as pd
from boto3 import Session

from core.m_logging import logger
from outputs.parquetFileOutput import ParquetFileOutput

aws_key = "AKIAIOFGB7H4GDSE5NJQ"
aws_secret = "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"
bucket_name = "core-products"
session = Session(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)


class ParquetS3Output(object):
    def __init__(self, s3_key='', flush_size=1000):
        # self.parquetFileOutput = ParquetFileOutput(s3_key, flush_size)
        self.dict = {}
        self.flush_size = flush_size
        self.length = 0
        self.s3_key = s3_key

    def cache(self, kv_pairs):
        logger.info("caching kv_pairs...")
        for column, value in kv_pairs.iteritems():
            if column not in self.dict:
                self.dict[column] = [value]
            else:
                self.dict[column].append(value)
        self.length += 1
        logger.info("kv_pairs is cached, current length is %s, kv_pairs: omitted" % self.length)
        if self.length >= self.flush_size:
            self.flush()

    def flush(self):
        logger.info("flushing parquet to s3...")
        # batch_no = content["batch_no"]
        # task_no = content["task_no"]
        # today = datetime.datetime.now().strftime('%Y-%m-%d')

        df_to_append = pd.DataFrame(self.dict)
        try:
            s3_obj = bucket.Object(key=self.s3_key).get()
            df_from_s3 = self._read_from_s3()
            df_to_disk = df_from_s3.append(df_to_append, ignore_index=True)
        except botocore.exceptions.ClientError as ex:
            if ex.response['Error']['Code'] == 'NoSuchKey':
                logger.info("no such key: %s" % self.s3_key)
            df_to_disk = df_to_append

        # if os.path.exists(self.s3_key):
        #     df_from_disk = pd.read_parquet(self.s3_key)
        #     df_to_disk = df_from_disk.append(df_to_append, ignore_index=True)
        # else:
        #     df_to_disk = df_to_append

        self._write_to_s3(df_to_disk)
        # df_to_disk.to_parquet(self.s3_key, compression='gzip')
        self.dict = {}
        self.length = 0

    def output(self, content):
        logger.info('output file to parquet file s3://%s/%s...' % (bucket_name, self.s3_key))
        if "ansiList" in content:
            for asin in content["ansiList"]:
                d = {"asin": str(asin["asin"]),
                     "summary_url": str(asin["product_summary_url"]),
                     "review_url": str(asin["review_url"]),
                     "product_name": str(asin["product_name"]),
                     "product_image_url": str(asin["product_image_url"]),
                     "product_rating": str(asin["product_rating"]),
                     "customer_review": str(asin["customer_review"]),
                     "product_price": str(asin["product_price"]),
                     "shipping": str(asin["shipping"]),
                     "stock": str(asin["stock"]),
                     "choice_type": str(asin["choice"]["choice_type"]),
                     "choice_count": str(asin["choice"]["choice_count"]),
                     "choice_price": str(asin["choice"]["choice_price"]),
                     "best_seller": str(asin["best_seller"]),
                     "node_no": str(content["node_no"]),
                     "page_no": str(content["page_no"]),
                     "scrape_date": str(content["scrape_date"])}
                self.cache(d)
        else:
            logger.error("ansiList not in content")

    def _read_from_s3(self):
        logger.info("reading from s3://%s/%s..." % (bucket_name, self.s3_key))
        obj = bucket.Object(key=self.s3_key)
        bf = io.BytesIO()
        obj.download_fileobj(bf)
        df = pd.read_parquet(bf)
        return df

    def _write_to_s3(self, df):
        logger.info("writing to s3://%s/%s..." % (bucket_name, self.s3_key))
        df.to_parquet("s3://%s/%s" % (bucket_name, self.s3_key), compression='gzip')
