import sys

from pyspark.sql import Window
from pyspark.sql.functions import lag
import pyspark.sql.functions as f

def date_diff(x):
    cur_date = x['date_of_birth'].split('-')
    prev_date = x['prev_date'].split('-')
    is_diff = 0
    if x['prev_date'] == 'nan' or x['prev_date'] == '0':
        is_diff = 1
    elif len(cur_date) > len(prev_date) and len(cur_date) == 2 and cur_date[0] != prev_date[0]:
        is_diff = 1
    elif len(cur_date) > len(prev_date) and len(cur_date) == 3 and (
            cur_date[0] != prev_date[0] or cur_date[1] != prev_date[1]):
        is_diff = 1
    elif len(cur_date) == len(prev_date) and len(cur_date) == 3 and (
            cur_date[0] != prev_date[0] or cur_date[1] != prev_date[1] or cur_date[2] != prev_date[2]):
        is_diff = 1
    elif cur_date[0] == 'no value':
        is_diff = 1
    return (x['id'], x['name'], x['date_of_birth'], x['order_item'], x['prev_date'], is_diff)


def merge_order_history(df):

    my_window = Window.partitionBy('id').orderBy('id', 'date_of_birth')
    df = df.withColumn("prev_date", lag("date_of_birth", 1, 0).over(my_window))

    rdd = df.rdd.map(lambda x: date_diff(x))
    df = rdd.toDF(['id', 'name', 'date_of_birth', 'order_item', 'prev_date', 'diff'])

    df = df.withColumn("groupkey", f.sum(df.diff).over(
        Window.partitionBy('id').orderBy('id', 'date_of_birth').rowsBetween(-sys.maxsize, 0)))

    grouped_df = df.groupby(['id', 'groupkey']). \
        agg({"name": "last", "date_of_birth": "last", "order_item": "collect_list"})

    grouped_df = grouped_df.withColumnRenamed('last(name)', 'name')
    grouped_df = grouped_df.withColumnRenamed('last(date_of_birth)', 'date_of_birth')
    grouped_df = grouped_df.withColumnRenamed('collect_list(order_item)', 'order_item')
    grouped_df = grouped_df.withColumn("order_item", f.array_join("order_item", ","))
    grouped_df = grouped_df.drop("groupkey")
    grouped_df = grouped_df.select('id', 'name', 'date_of_birth', 'order_item')
    grouped_df.show()
    return grouped_df