import datetime
import json
import platform
from typing import Iterable

import pandas as pd
from pyspark import Row, SparkConf
from pyspark.sql import SparkSession


def spark_rows_to_df(rows: Iterable[Row]):
    fields_names = []
    values = {}
    for row in rows:
        if len(fields_names) == 0:
            fields_names = row.__fields__
            for f in fields_names:
                values[f] = []
        for i in range(len(row)):
            values[fields_names[i]].append(row[i])
    df = pd.DataFrame(values)
    return [df]


def get_spark_session(name: str):
    conf = SparkConf()
    conf.setAppName(name)
    os_name = platform.system()
    if 'windows' in os_name.lower():
        conf.setMaster('local[*]')
    conf.set('spark.dynamicAllocation.enabled', 'false')
    conf.set('spark.sql.sources.partitionOverwriteMode', 'DYNAMIC')
    conf.set('hive.exec.dynamic.partition.mode', 'nonstrict')
    conf.set('spark.yarn.maxAppAttempts', '0')
    spark = SparkSession.builder.config(conf=conf)
    if not 'windows' in os_name.lower():
        spark.enableHiveSupport()
    spark = spark.getOrCreate()

    # 设置日志打印级别
    spark.sparkContext.setLogLevel("INFO")
    return spark


def get_filter(args):
    # 传入json格式
    if len(args) > 1:
        inp = json.loads(args[1])
    else:
        inp = {}
    # 默认运行起始日期
    date_start = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    # 默认运行结束日期
    date_end = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
    # 默认物料号 where 过滤条件，如果传入materials，按照传入物料号过滤，如果没有传入物料号，运行全部数据
    materials = ''
    model = ''
    model_except = ''
    if 'date_start' in inp and inp['date_start'] != '':
        date_start = inp['date_start']
    if 'date_end' in inp and inp['date_end'] != '':
        date_end = inp['date_end']
    if 'materials' in inp and inp['materials'] != '':
        materials = inp['materials']
    if 'model' in inp and inp['model'] != '':
        model = inp['model']
    if 'model_except' in inp and inp['model_except'] != '':
        model_except = inp['model_except']

    # 只需要传入model_except、model、materials中的其中一个
    model_where = ''
    if len(model_except) > 0:
        model_where = f''' and regexp_extract(model,'{model_except.replace(',', '|')}',0) = '' '''
    elif len(model) > 0:
        model_where = f''' and regexp_extract(model,'{model.replace(',', '|')}',0) != '' '''
    elif len(materials) > 0:
        ms = "','".join(materials.split(","))
        model_where = f" and material in ('{ms}') "

    days = (datetime.datetime.strptime(date_end, "%Y-%m-%d") - datetime.datetime.strptime(date_start, "%Y-%m-%d"))
    # 一天1000个分区（每个分区大概3万条数据，方便增加sort速度）
    part = (days.days + 1) * 2000

    return date_start, date_end, model_where, part
