from typing import Any, Dict, List
import argparse
import pandas as pd
import yaml
from pyspark.sql import SparkSession
from pyspark.sql.types import DoubleType
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.core import (
    ExpectationConfiguration,
    ExpectationSuiteValidationResult,
    IDDict,
)
from great_expectations.core.batch import Batch, BatchDefinition
from great_expectations.data_context.data_context.base_data_context import (
    BaseDataContext,
)
from great_expectations.data_context.types.base import (
    CheckpointConfig,
    DataContextConfig,
    InMemoryStoreBackendDefaults,
)
from great_expectations.execution_engine import PandasExecutionEngine, SparkDFExecutionEngine
from great_expectations.util import filter_properties_dict
from great_expectations.validator.validator import Validator
import time
start = time.perf_counter()
sparksession = SparkSession.builder.master("local[1]") \
    .appName('ge_demo') \
    .getOrCreate()

parser = argparse.ArgumentParser(description="function parameters of argparse")
parser.add_argument('-cfp', '--config_file_path', default='')

args = parser.parse_args()

with open(args.config_file_path, mode='r', encoding='utf-8') as f:
    yamlConf = yaml.load(f.read(), Loader=yaml.FullLoader)

dataframe = None
if yamlConf['data_source'] == 'excel':
    dataframe = pd.read_excel(io=yamlConf['file_path'])
elif yamlConf['data_source'] == 'csv':
    dataframe = pd.read_csv(filepath_or_buffer=yamlConf['file_path'], encoding='utf-8') #gb18030
else:
    print('There is no your expected data source!')

df = sparksession.createDataFrame(data=dataframe.astype(str))
if yamlConf['need_cast_to_double_columns'] is not None:
    df = df.withColumns({column+'_temp': df[column].cast(DoubleType()) for column in yamlConf['need_cast_to_double_columns']})

data_context_config: DataContextConfig = DataContextConfig(
    datasources={  # type: ignore[arg-type]
        "pandas_datasource": {
            "execution_engine": {
                "class_name": "SparkDFExecutionEngine",
                "module_name": "great_expectations.execution_engine",
            },
            "class_name": "Datasource",
            "module_name": "great_expectations.datasource",
            "data_connectors": {
                "runtime_data_connector": {
                    "class_name": "RuntimeDataConnector",
                    "batch_identifiers": [
                        "id_key_0",
                        "id_key_1",
                    ],
                }
            },
        },
    },
    expectations_store_name="expectations_store",
    validations_store_name="validations_store",
    evaluation_parameter_store_name="evaluation_parameter_store",
    checkpoint_store_name="checkpoint_store",
    store_backend_defaults=InMemoryStoreBackendDefaults(),
)
context = BaseDataContext(project_config=data_context_config)
batch_definition = BatchDefinition(
    datasource_name="pandas_datasource",
    data_connector_name="runtime_data_connector",
    data_asset_name="my_asset",
    batch_identifiers=IDDict({}),
    batch_spec_passthrough=None,
)
batch = Batch(
    data=df,
    batch_definition=batch_definition,
)
engine = SparkDFExecutionEngine()
my_validator: Validator = Validator(
    execution_engine=engine,
    data_context=context,
    batches=[
        batch,
    ],
)
context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
expectation_config = ExpectationConfiguration(
    expectation_type=yamlConf['function_name'],
    kwargs = yamlConf['kwargs'],
    # kwargs={list(sub_dict.keys())[0]: list(sub_dict.values())[0] for sub_dict in yamlConf['kwargs']},
)
test_suite.add_expectation(expectation_configuration=expectation_config)
#
test_suite.expectation_suite_name = "test_suite"
context.add_or_update_expectation_suite(
    expectation_suite=test_suite,
)
checkpoint_dict: dict = {
    "name": "my_checkpoint",
    "config_version": 1.0,
    "class_name": "Checkpoint",  # or SimpleCheckpoint
    "module_name": "great_expectations.checkpoint",
    "template_name": None,
    "run_name_template": "%Y-%M-datapath: " + yamlConf['run_name'],
    "expectation_suite_name": None,
    "batch_request": None,
    "profilers": [],
    "action_list": [
        {
            "name": "store_validation_result",
            "action": {"class_name": "StoreValidationResultAction"},
        },
        {
            "name": "store_evaluation_params",
            "action": {"class_name": "StoreEvaluationParametersAction"},
        },
        {
            "name": "update_data_docs",
            "action": {"class_name": "UpdateDataDocsAction"},
        },
    ],
    "validations": [],
    "runtime_configuration": {
        "result_format": {
            "result_format": "COMPLETE",
            "unexpected_index_column_names": [yamlConf['index_name']],
            "return_unexpected_index_query": True,
        },
    },
}
batch_request = {
    "datasource_name": "pandas_datasource",
    "data_connector_name": "runtime_data_connector",
    "data_asset_name": "IN_MEMORY_DATA_ASSET",
    "runtime_parameters": {
        "batch_data": df,
    },
    "batch_identifiers": {
        "id_key_0": 1234567890,
    },
}
checkpoint_config = CheckpointConfig(**checkpoint_dict)
context.add_or_update_checkpoint(
    **filter_properties_dict(
        properties=checkpoint_config.to_json_dict(),
        clean_falsy=True,
    ),
)
context._save_project_config()
result: CheckpointResult = context.run_checkpoint(
    checkpoint_name="my_checkpoint",
    expectation_suite_name="test_suite",
    batch_request=batch_request,
)

print('============>结果写入结果文件<============')
with open(file='/home/output_file/' + yamlConf['output_file_name_pre'] + '_output.json', mode='w', encoding='utf-8') as f:
    print(result.list_validation_results()[0], file=f)
sparksession.stop()
end = time.perf_counter()
runTime = end - start
with open(file='/home/output_file/' + yamlConf['output_file_name_pre'] + '_run_time_test.log', mode='w', encoding='utf-8') as f:
    print('运行时间(s): %s' % runTime, file=f)
    print('运行时间(ms): %s' % (runTime*1000), file=f)
