import abc
import datetime
import json

from pandas import DataFrame as PandasDF
from pyspark.sql import SparkSession, DataFrame as SparkDF
import pandas as pd

from common.athena_opt import sql_pandas_data
from constants import *


class FileSourceConfig:
    def __init__(self, file_type, path, columns):
        self.file_type = file_type
        self.file_path = path
        self.columns = columns


class SourceReader(abc.ABC):
    pass


class SparkSourceReader(SourceReader, abc.ABC):
    @abc.abstractmethod
    def read(self, spark: SparkSession) -> SparkDF:
        pass


class PandasSourceReader(SourceReader, abc.ABC):
    @abc.abstractmethod
    def read(self) -> PandasDF:
        pass


class SparkSqlSourceReader(SparkSourceReader):

    def __init__(self, sql: str):
        self.sql = sql

    def read(self, spark: SparkSession) -> SparkDF:
        return spark.sql(self.sql)


class SparkFileSourceReader(SparkSourceReader):

    def __init__(self, config: FileSourceConfig):
        self.config = config

    def read(self, spark: SparkSession) -> SparkDF:
        return spark.read.format(self.config.file_type).option("header", "true").load(self.config.file_path)


class PandasSqlSourceReader(PandasSourceReader):

    def __init__(self, sql: str):
        self.sql = sql

    def read(self) -> PandasDF:
        return sql_pandas_data(self.sql)


class PandasFileSourceReader(PandasSourceReader):

    def __init__(self, config: FileSourceConfig):
        self.config = config

    def read(self) -> PandasDF:
        return pd.read_csv(self.config.file_path)


def create_reader(common_params, job_params):
    framework = str(common_params[Source.FRAMEWORK]).lower()
    source_type = str(common_params[Source.SOURCE_TYPE]).lower()

    readers = []

    for table in job_params.keys():
        v = job_params[table]

        if source_type == "sql":
            sql = build_sql(v, table)
            if framework == "spark":
                return SparkSqlSourceReader(sql)
            elif framework == "sagemaker":
                return PandasSqlSourceReader(sql)
            else:
                raise RuntimeError('no framework named', framework)
        elif source_type == "file":
            path = v[FileSource.FILE_PATH]
            ty = v[FileSource.FILE_TYPE]
            cols = v['columns']
            file_conf = FileSourceConfig(ty, path, cols)
            if framework == "spark":
                return SparkFileSourceReader(file_conf)
            elif framework == "sagemaker":
                return PandasFileSourceReader(file_conf)
            else:
                raise RuntimeError('no framework named', framework)
        else:
            raise RuntimeError('no source_type named', source_type)


def build_sql(params: json, table: str):
    cols = ",".join(params['columns'])
    start, end, where = filter_sql(params)
    sql = f'''
                    select {cols}
                    from (
                        select {cols},count(1) over(partition by sn, p_date) as day_count
                        from {table}
                        where p_date between '{start}' and '{end}'
                        {where}
                    )
                    where day_count >= 100
                    '''
    print("read sql:", sql)
    return sql


def filter_sql(params: json):
    # 传入json格式
    # 默认运行起始日期
    date_start = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    # 默认运行结束日期
    date_end = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    # 默认物料号 where 过滤条件，如果传入materials，按照传入物料号过滤，如果没有传入物料号，运行全部数据
    materials = ''
    materials_except = ''
    model = ''
    model_except = ''
    sn = ''
    sn_except = ''
    if 'date_start' in params and params['date_start'] != '':
        date_start = params['date_start']
    if 'date_end' in params and params['date_end'] != '':
        date_end = params['date_end']
    if 'materials' in params and params['materials'] != '':
        materials = params['materials']
    if 'materials_except' in params and params['materials_except'] != '':
        materials_except = params['materials_except']
    if 'model' in params and params['model'] != '':
        model = params['model']
    if 'model_except' in params and params['model_except'] != '':
        model_except = params['model_except']
    if 'sn' in params and params['sn'] != '':
        sn = params['sn']
    if 'sn_except' in params and params['sn_except'] != '':
        sn_except = params['sn_except']

    # 只需要传入model_except、model、materials中的其中一个
    model_where = ''
    if len(sn_except) > 0:
        sns = "','".join(sn_except.split(","))
        model_where = f" and sn not in ('{sns}') "
    elif len(sn) > 0:
        sns = "','".join(sn.split(","))
        model_where = f" and sn in ('{sns}') "

    if len(model_except) > 0:
        model_where = f''' and regexp_extract(model,'{model_except.replace(',', '|')}',0) = '' '''
    elif len(model) > 0:
        model_where = f''' and regexp_extract(model,'{model.replace(',', '|')}',0) != '' '''

    if len(materials_except) > 0:
        ms = "','".join(materials_except.split(","))
        model_where = f" and material not in ('{ms}') "
    elif len(materials) > 0:
        ms = "','".join(materials.split(","))
        model_where = f" and material in ('{ms}') "

    max_days = params['max_day']

    start = (datetime.datetime.strptime(date_start, "%Y-%m-%d") - datetime.timedelta(days=max_days)).strftime(
        '%Y-%m-%d')
    end = date_end

    # days = (datetime.datetime.strptime(date_end, "%Y-%m-%d") - datetime.datetime.strptime(date_start, "%Y-%m-%d"))
    # # 一天1000个分区（每个分区大概3万条数据，方便增加sort速度）
    # part = (days.days + 1) * 2000

    return start, end, model_where
