# coding=utf-8
import io

import boto3
import pandas as pd
import fastparquet
from boto3 import Session

from outputs.parquetFileOutput import ParquetFileOutput


d = pd.read_parquet("s3://core-products/TopProducts/df3.gzip.parquet")

buffer = io.BytesIO()
aws_key = "AKIAIOFGB7H4GDSE5NJQ"
aws_secret = "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"
session = Session(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
s3 = session.resource('s3')
bucket = s3.Bucket("core-products")
obj = bucket.Object(key="TopProducts/df.gzip.parquet")
obj.download_fileobj(buffer)
df = pd.read_parquet(buffer)



df.to_parquet("s3://core-products/TopProducts/df3.gzip.parquet", compression='gzip')

print(df.head())
data = {"asin": "asin_01", "review_url": "review_url_1"}

pfo = ParquetFileOutput()
pfo.cache(data)
pfo.flush()

data2 = {"asin": "asin_02", "review_url": "review_url_2"}
pfo.cache(data2)
pfo.flush()

df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) # 从字典构建DataFrame
# df.to_parquet('myfile.parquet', engine='fastparquet')
df.to_parquet('df.gzip.parquet', compression='gzip')


# df = pd.read_parquet('df.gzip.parquet')
df = df.append(pd.DataFrame(data={'col1': [11, 22], 'col2': [33, 44]}), ignore_index=True)
df.to_parquet('df.gzip.parquet', compression='gzip')

# df

df = pd.read_parquet('df.gzip.parquet')
print(df)

# df = pd.read_parquet('df.gzip.parquet')
# df["col1"]