from datetime import datetime
from typing import Any

import pandas as pd
import pyspark.sql as ps
from pyspark.sql import SparkSession
from triad.collections.schema import Schema

import fugue.api as fi
import fugue.test as ft
from fugue.dataframe.pandas_dataframe import PandasDataFrame
from fugue.plugins import get_column_names, rename, infer_execution_engine
from fugue_spark._utils.convert import to_schema, to_spark_df, to_spark_schema
from fugue_spark.dataframe import SparkDataFrame
from fugue_test.dataframe_suite import DataFrameTests


class SparkDataFrameTestsBase(DataFrameTests.Tests):
    @property
    def spark_session(self) -> SparkSession:
        return self.context.session

    def df(self, data: Any = None, schema: Any = None) -> SparkDataFrame:
        return self.engine.to_df(data, schema=schema)

    def test_alter_columns_invalid(self):
        # TODO: Spark will silently cast invalid data to nulls without exceptions
        pass


@ft.fugue_test_suite("spark", mark_test=True)
class SparkDataFrameTests(SparkDataFrameTestsBase):
    pass


class NativeSparkDataFrameTestsBase(DataFrameTests.NativeTests):
    @property
    def spark_session(self) -> SparkSession:
        return self.context.session

    def df(self, data: Any = None, schema: Any = None):
        return self.engine.to_df(data, schema=schema).native

    def to_native_df(self, pdf: pd.DataFrame) -> Any:
        return to_spark_df(self.spark_session, pdf)

    def test_not_local(self):
        assert not fi.is_local(self.df([], "a:int,b:str"))

    def test_alter_columns_invalid(self):
        # TODO: Spark will silently cast invalid data to nulls without exceptions
        pass


@ft.fugue_test_suite("spark", mark_test=True)
class NativeSparkDataFrameTests(NativeSparkDataFrameTestsBase):
    pass


def test_infer_engine(spark_session):
    pdf = pd.DataFrame([[0]], columns=["a"])
    sdf = spark_session.createDataFrame(pdf)
    df = SparkDataFrame(sdf)
    assert infer_execution_engine([]) is None
    assert isinstance(infer_execution_engine([df]), type(spark_session))
    assert isinstance(infer_execution_engine([sdf]), type(spark_session))
    assert isinstance(infer_execution_engine([pdf, sdf]), type(spark_session))


def test_init(spark_session):
    sdf = spark_session.createDataFrame([["a", 1]])
    df = SparkDataFrame(sdf, "a:str,b:double")
    assert [["a", 1.0]] == df.as_array()
    assert [["a", 1.0]] == df.as_pandas().values.tolist()
    assert not df.is_local
    assert df.is_bounded
    assert df.num_partitions > 0

    df = _df([["a", 1], ["b", 2]])
    assert [["a", 1], ["b", 2]] == df.as_array()
    df = _df([], "a:str,b:str")
    assert [] == df.as_array()
    assert df.schema == "a:str,b:str"
    df = _df([["a", 1], ["b", 2]], "a:str,b:str")
    assert [["a", "1"], ["b", "2"]] == df.as_array()
    assert df.schema == "a:str,b:str"


def test_nested(spark_session):
    # data = [[dict(a=1, b=[3, 4], d=1.0)], [json.dumps(dict(b=[30, "40"]))]]
    # df = SparkDataFrame(data, "a:{a:str,b:[int]}")
    # a = df.as_array(type_safe=True)
    # assert [[dict(a="1", b=[3, 4])], [dict(a=None, b=[30, 40])]] == a

    data = [[[10, 20]]]
    sdf = spark_session.createDataFrame(data, to_spark_schema("a:[int]"))
    df = SparkDataFrame(sdf)
    assert data == df.as_array(type_safe=False)
    assert data == df.as_array(type_safe=True)
    assert data == list(df.as_array_iterable(type_safe=False))
    assert data == list(df.as_array_iterable(type_safe=True))

    data = [[dict(b=[30, 40])]]
    sdf = spark_session.createDataFrame(data, to_spark_schema("a:{a:str,b:[int]}"))
    df = SparkDataFrame(sdf)
    a = df.as_array(type_safe=False)
    assert [[dict(a=None, b=[30, 40])]] == a
    a = df.as_array(type_safe=True)
    assert [[dict(a=None, b=[30, 40])]] == a
    a = list(df.as_array_iterable(type_safe=False))
    assert [[dict(a=None, b=[30, 40])]] == a
    a = list(df.as_array_iterable(type_safe=True))
    assert [[dict(a=None, b=[30, 40])]] == a


def _test_as_array_perf():
    s = Schema()
    arr = []
    for i in range(100):
        s.append(f"a{i}:int")
        arr.append(i)
    for i in range(100):
        s.append(f"b{i}:int")
        arr.append(float(i))
    for i in range(100):
        s.append(f"c{i}:str")
        arr.append(str(i))
    data = []
    for i in range(5000):
        data.append(list(arr))
    df = SparkDataFrame(data, s)
    res = df.as_array()
    res = df.as_array(type_safe=True)
    nts, ts = 0.0, 0.0
    for i in range(10):
        t = datetime.now()
        res = df.as_array()
        nts += (datetime.now() - t).total_seconds()
        t = datetime.now()
        res = df.as_array(type_safe=True)
        ts += (datetime.now() - t).total_seconds()
    print(nts, ts)


def _df(data, schema=None):
    session = SparkSession.builder.getOrCreate()
    if schema is not None:
        pdf = PandasDataFrame(data, to_schema(schema))
        df = to_spark_df(session, pdf.native, schema)
    else:
        df = to_spark_df(session, data)
    return SparkDataFrame(df, schema)


def _test_get_column_names(spark_session):
    df = to_spark_df(spark_session, pd.DataFrame([[0, 1, 2]], columns=["0", "1", "2"]))
    assert get_column_names(df) == ["0", "1", "2"]


def _test_rename(spark_session):
    pdf = to_spark_df(spark_session, pd.DataFrame([[0, 1, 2]], columns=["a", "b", "c"]))
    df = rename(pdf, {})
    assert isinstance(df, ps.DataFrame)
    assert get_column_names(df) == ["a", "b", "c"]

    pdf = to_spark_df(spark_session, pd.DataFrame([[0, 1, 2]], columns=["0", "1", "2"]))
    df = rename(pdf, {"0": "_0", "1": "_1", "2": "_2"})
    assert isinstance(df, ps.DataFrame)
    assert get_column_names(df) == ["_0", "_1", "_2"]
