from typing import Any, Dict, List

import pandas as pd
from pyspark.sql import SparkSession
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.core import (
    ExpectationConfiguration,
    ExpectationSuiteValidationResult,
    IDDict,
)
from great_expectations.core.batch import Batch, BatchDefinition
from great_expectations.data_context.data_context.base_data_context import (
    BaseDataContext,
)
from great_expectations.data_context.types.base import (
    CheckpointConfig,
    DataContextConfig,
    InMemoryStoreBackendDefaults,
)
from great_expectations.execution_engine import PandasExecutionEngine, SparkDFExecutionEngine
from great_expectations.util import filter_properties_dict
from great_expectations.validator.validator import Validator
import time

data = [
    {"id": 0, "a": 1, "b": 2, "c": 3},
    {"id": 1, "a": 4, "b": 5, "c": 6},
    {"id": 2, "a": None, "b": 8, "c": 9},
    {"id": 3, "a": 7, "b": 6, "c": 99},
    {"id": 4, "a": 4, "b": None, "c": None},
    {"id": 5, "a": 12, "b": None, "c": 7},
    {"id": 6, "a": 5, "b": 4, "c": 0},
]
sparksession = SparkSession.builder.master("local[1]") \
    .appName('ge_demo') \
    .getOrCreate()
 
df = sparksession.createDataFrame(data=data)
# dataframe = pd.read_excel(io='./data/ceshi_null.xlsx')
# dataframe = pd.read_csv('../kangge_testdata.csv', encoding='gb18030')
# dataframe = pd.read_excel(io='./data/ceshi_1.xlsx')
# dataframe = pd.read_csv('./data1.csv', encoding='gb18030')

# NOTE: The following code is only for testing and can be ignored by users.
data_context_config: DataContextConfig = DataContextConfig(
    datasources={  # type: ignore[arg-type]
        "pandas_datasource": {
            "execution_engine": {
                "class_name": "SparkDFExecutionEngine",
                "module_name": "great_expectations.execution_engine",
            },
            "class_name": "Datasource",
            "module_name": "great_expectations.datasource",
            "data_connectors": {
                "runtime_data_connector": {
                    "class_name": "RuntimeDataConnector",
                    "batch_identifiers": [
                        "id_key_0",
                        "id_key_1",
                    ],
                }
            },
        },
    },
    expectations_store_name="expectations_store",
    validations_store_name="validations_store",
    evaluation_parameter_store_name="evaluation_parameter_store",
    checkpoint_store_name="checkpoint_store",
    store_backend_defaults=InMemoryStoreBackendDefaults(),
)
context = BaseDataContext(project_config=data_context_config)
batch_definition = BatchDefinition(
    datasource_name="pandas_datasource",
    data_connector_name="runtime_data_connector",
    data_asset_name="my_asset",
    batch_identifiers=IDDict({}),
    batch_spec_passthrough=None,
)
batch = Batch(
    data=df,
    batch_definition=batch_definition,
)
engine = SparkDFExecutionEngine()
my_validator: Validator = Validator(
    execution_engine=engine,
    data_context=context,
    batches=[
        batch,
    ],
)

#判断值是否都在集合当中
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_values_to_be_in_set",
#     kwargs={
#         "column": "a",
#         "value_set": [999001,999002],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#判断值是否满足正则表达式
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_values_to_match_regex_list",
#     kwargs={
#         "column": "a",
#         "regex_list": ["\d+?"],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#希望每条记录的值在列出的列中都是唯一的
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_select_column_values_to_be_unique_within_record",
#     kwargs={
#         "column_list": ["a", "b"],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#希望每条记录的列值唯一
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_pair_values_to_be_in_set",
#     kwargs={
#         "column_A": "a",
#         "column_B": "b",
#         "value_pairs_set": [(1, 2)]
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#希望指定列表中的行值之和对每行相同，并等于指定值
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_multicolumn_sum_to_equal",
#     kwargs={
#         "column_list": ["a", "b"],
#         "sum_total": 3
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#希望指定的多列是独一无二的
# context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
# test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_compound_columns_to_be_unique",
#     kwargs={
#         "column_list": ["a", "b"],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#判断值是否有不在集合当中的
context.add_or_update_expectation_suite(expectation_suite_name="test_suite")
test_suite = context.get_expectation_suite(expectation_suite_name="test_suite")
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_values_to_not_be_in_set",
#     kwargs={
#         "column": "a",
#         "value_set": [7,12],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#值非空
expectation_config = ExpectationConfiguration(
    expectation_type="expect_column_values_to_not_be_null",
    kwargs={
        "column": "b",
    },
)

test_suite.add_expectation(expectation_configuration=expectation_config)
#

#空值
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_values_to_not_be_null",
#     kwargs={
#         "column": "a",
#     },
# )

# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#列有序匹配指定的列表
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_table_columns_to_match_ordered_list",
#     kwargs={
#         "column_list": ['id', 'code', 'name', 'spec_id', 'region_id', 'address_id', 'address_name', 'coordinate_x', 'coordinate_y', 'area_type_id', 'property_type_id', 'parent_id', 'is_valid'],
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#行值是否等于指定值
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_table_row_count_to_equal",
#     kwargs={
#         "value": 9999,
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#列无需匹配指定集合
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_table_columns_to_match_set",
#     kwargs={
#         "column_set": ['id', 'a', 'b', 'c'],
#         "exact_match": True
#     },
# )
# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#列值是否存在
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_to_exist",
#     kwargs={
#         "column": "ar_type_id",
#     },
# )

# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#列值是否在规定长度范围之内
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_value_lengths_to_be_between",
#     kwargs={
#         "column": "code",
#         "min_value": 2,
#         "max_value": 5
#     },
# )

# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#列值是否在规定长度范围之内
# expectation_config = ExpectationConfiguration(
#     expectation_type="expect_column_values_to_be_edtf_parseable",
#     kwargs={
#         "column": "pickup_datetime",
#     },
# )

# test_suite.add_expectation(expectation_configuration=expectation_config)
#

#
test_suite.expectation_suite_name = "test_suite"
context.add_or_update_expectation_suite(
    expectation_suite=test_suite,
)
file_name = "/home/songyunlong/..."
checkpoint_dict: dict = {
    "name": "my_checkpoint",
    "config_version": 1.0,
    "class_name": "Checkpoint",  # or SimpleCheckpoint
    "module_name": "great_expectations.checkpoint",
    "template_name": None,
    # "run_name_template": "%Y-%M-foo-bar-template-test",
    "run_name_template": "%Y-%M-" + file_name,
    # "run_time_template": "yyyy-mm-dd hh:mm:ss",
    "expectation_suite_name": None,
    "batch_request": None,
    "profilers": [],
    "action_list": [
        {
            "name": "store_validation_result",
            "action": {"class_name": "StoreValidationResultAction"},
        },
        {
            "name": "store_evaluation_params",
            "action": {"class_name": "StoreEvaluationParametersAction"},
        },
        {
            "name": "update_data_docs",
            "action": {"class_name": "UpdateDataDocsAction"},
        },
    ],
    "validations": [],
    "runtime_configuration": {
        "result_format": {
            "result_format": "COMPLETE",
            "unexpected_index_column_names": ["id"],
            "return_unexpected_index_query": True,
        },
    },
}
batch_request = {
    "datasource_name": "pandas_datasource",
    "data_connector_name": "runtime_data_connector",
    "data_asset_name": "IN_MEMORY_DATA_ASSET",
    "runtime_parameters": {
        "batch_data": df,
    },
    "batch_identifiers": {
        "id_key_0": 1234567890,
    },
}
checkpoint_config = CheckpointConfig(**checkpoint_dict)
context.add_or_update_checkpoint(
    **filter_properties_dict(
        properties=checkpoint_config.to_json_dict(),
        clean_falsy=True,
    ),
)
context._save_project_config()
result: CheckpointResult = context.run_checkpoint(
    checkpoint_name="my_checkpoint",
    expectation_suite_name="test_suite",
    batch_request=batch_request,
)

# output_file_name = 'expect_column_values_to_not_be_null_'
output_file_name = ''
print('============>结果写入output.txt<============')
with open(file='./' + output_file_name + 'output.txt', mode='w') as f:
    print(result.list_validation_results()[0], file=f)
    print("\"run_time\": " + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()), file=f)
sparksession.stop()