import pyarrow as pa
import pyarrow.parquet as pq
import boto3
from boto3.s3.transfer import TransferConfig
from botocore.client import Config


from src.py_data_sync.source import AbsSource


class LocalFsSink:

    def __init__(self, source: AbsSource, conf: dict) -> None:
        self.source = source
        self.conf = conf

    def load(self):
        """
        iterate source (cursor.fetchmany) to get record rows
        and write to local parquet file
        """
        self.source.execute()
        path = self.conf["path"]
        desc = self.source.desc
        schema = self.source.schema
        with pq.ParquetWriter(path, schema=schema) as writer:
            for rows in self.source:
                data = [{desc[i][0]: row[i] for i in range(len(row))} for row in rows]
                table = pa.Table.from_pylist(data, schema=schema)
                writer.write_table(table)

    def persist(self):
        """
        persist local file for other sink
        """
        self.conf["path"] = ".persist.parquet"
        self.load()


class S3Sink:

    def __init__(self, source: AbsSource, conf: dict) -> None:
        self.source = source
        self.conf = conf
        self.loaclfs_sink = LocalFsSink(source, conf)

    def get_s3_client(self):
        return boto3.client(
            "s3",
            aws_access_key_id=self.conf["access_key"],
            aws_secret_key=self.conf["secret_key"],
            endpoint_url=self.conf["endpoint"],
            config=Config(s3={"addressing_style": "virtual"}))

    def load(self):
        """
        use localfs_sink to persist parquet file,
        then upload_fileobj to s3
        """
        self.s3_client = self.get_s3_client()
        self.loaclfs_sink.persist()

        threadshold = 100
        chunksize = 16
        concurrency = 4
        bucket = self.conf["bucket"]
        key = self.conf["key"]

        with open(".persist.parquet", "rb") as f:
            self.s3_client.upload_fileobj(f, bucket, key, Config=TransferConfig(
                multipart_threshold=threadshold,
                multipart_chunksize=chunksize,
                max_concurrency=concurrency
            ))
