import copy
import csv
import dataclasses
import datetime
import logging
import numpy
import pathlib
import peewee
import playhouse.sqlite_ext
from typing import Any, Dict, List, Set, Tuple, Type, Optional, Union, Callable
import vnpy.tools.utility
import vnpy.tools.database_cta
import vnpy.trader.utility
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.optimize import OptimizationSetting
from vnpy_ctabacktester.ui.widget import OptimizationSettingEditor


def saveByDict(allLine, filepath, mode='w', encoding='utf8', dialect='excel', kwds={}, header1=None) -> None:
    '''
    以(csv.DictWriter)保存数据
    Python CSV Reader/Writer 例子
    https://blog.csdn.net/u011284860/article/details/51031051
    '''
    assert isinstance(allLine, (list, tuple))
    if 0 < len(allLine):
        assert isinstance(allLine[0], dict)

    if not isinstance(filepath, pathlib.Path):
        filepath: pathlib.Path = pathlib.Path(filepath)

    filepath.parent.mkdir(parents=True, exist_ok=True)

    with open(file=filepath, mode=mode, encoding=encoding, newline='') as f:
        # 排序文件头的字段名,
        headers = sorted(allLine[0].keys())
        # 删除 header1 中不符要求(不在 headers 中)的字段,
        header1: List[str] = [_ for _ in header1 if _ in headers] if header1 else []
        # 删除 headers 中的重复的(存在 header1 中)的字段,
        headers: List[str] = [_ for _ in headers if _ not in header1]
        # 合并文件头的字段名,
        headers: List[str] = header1 + headers
        #
        csvWriter = csv.DictWriter(f, fieldnames=headers, dialect=dialect, **kwds)
        csvWriter.writeheader()
        csvWriter.writerows(allLine)


@dataclasses.dataclass(frozen=True, eq=True, order=True)
class CtaBacktestDataKey:
    """
    CtaBacktestDataKey
    """
    strategy_name: str = None
    exchange: Exchange = None
    symbol: str = None
    interval: Interval = None
    start: datetime.datetime = None
    end: datetime.datetime = None
    setting: Tuple[tuple] = None

    @classmethod
    def from_line(cls, line: vnpy.tools.database_cta.CtaStatisticsData, setting: dict = None):
        """"""
        item = CtaBacktestDataKey(
            strategy_name=line.strategy_name,
            exchange=line.exchange,
            symbol=line.symbol,
            interval=line.interval,
            start=line.start,
            end=line.end,
            setting=tuple(sorted(line.setting.items() if setting is None else setting.items())),
        )
        return item


def main_function_bak(cta_setting: "CtaSetting"):
    """
    """
    database: vnpy.tools.database_cta.CtaBacktestDatabase = vnpy.tools.database_cta.get_database()

    logging.info(f"path={database.db.database}")

    lines: List[vnpy.tools.database_cta.CtaStatisticsData] = database.load_cta_statistics_data(
        strategy_name=cta_setting.strategy_name,
        interval=cta_setting.interval,
        exchange=cta_setting.exchange,
        symbol=cta_setting.symbol,
        start=cta_setting.start,
        end=cta_setting.end,
    )

    logging.info(f"load_cta_statistics_data, len_lines={len(lines)}")

    lines_copy: List[vnpy.tools.database_cta.CtaStatisticsData] = copy.copy(lines)
    calculate(
        lines=lines_copy,
        filter_cond=cta_setting.filter_cond,
        target_name=cta_setting.target_name,
        percentiles=cta_setting.percentiles,
        edge_length=cta_setting.edge_length,
        data_number=cta_setting.data_number,
    )


def calculate(
    lines: List[vnpy.tools.database_cta.CtaStatisticsData],
    filter_cond: Optional[dict],
    target_name: str,
    percentiles: float,
    edge_length: int,
    data_number: int,
):
    """
    lines: 略,
    target_name: 优化目标,
    PerCenT: 边界值取自多少分位的数据,
    edge_len: 边长,
    filter_cond: 过滤条件(fzkx_window:分钟K线),
    print_count: 输出多少条数据,
    """
    assert target_name in OptimizationSettingEditor.DISPLAY_NAME_MAP.values()

    # 根据过滤条件, 保留条件范围内的数据,
    if filter_cond is not None:
        for key, val in filter_cond.items():
            lines: List[vnpy.tools.database_cta.CtaStatisticsData] = [line for line in lines if line.setting[key] == val]

    logging.info(f"filter_cond[{'V' if filter_cond else 'X'}], len_lines={len(lines)}")

    # 排序正收益的数据,
    lineP: List[vnpy.tools.database_cta.CtaStatisticsData] = [line for line in lines if line.result[target_name] > 0]
    lineP: List[vnpy.tools.database_cta.CtaStatisticsData] = sorted(lineP, key=lambda x: x.result[target_name], reverse=True)
    # 计算边界值,
    boundary_value = lineP[int(1 * len(lineP) * (1 - percentiles) * 1)].result[target_name]
    assert 0 < boundary_value

    logging.info(f"boundary_value={boundary_value}, len_lineP={len(lineP)}")

    # 维护映射关系,
    mapping: Dict[CtaBacktestDataKey, vnpy.tools.database_cta.CtaStatisticsData] = {}
    for line in lines:
        kkkk = CtaBacktestDataKey.from_line(line=line, setting=None)
        mapping[kkkk] = line

    logging.info(f"len_mapping={len(mapping)}")

    # 排序数据,
    lines: List[vnpy.tools.database_cta.CtaStatisticsData] = sorted(lines, key=lambda x: (x.result[target_name], x.id), reverse=True)

    logging.info(f"sorted, len_lines={len(lines)}")

    # 开始计算,
    destination: List[vnpy.tools.database_cta.CtaStatisticsData] = []

    for idx, line in enumerate(lines):
        line: vnpy.tools.database_cta.CtaStatisticsData = line

        # 生成"边长"范围内的所有参数,
        params = {}
        for key, val in line.setting.items():
            if filter_cond is not None and key in filter_cond:
                params[key] = [filter_cond[key], ]
                continue
            params[key] = [*range(val - edge_length, val + edge_length + 1, 1)]

        # 生成参数列表,
        optimization_setting = OptimizationSetting()
        optimization_setting.params = params
        setting_list: List[dict] = optimization_setting.generate_settings()

        # 查询"边长"范围内的所有结果,
        data_list: List[vnpy.tools.database_cta.CtaStatisticsData] = []
        for setting in setting_list:
            kkkk = CtaBacktestDataKey.from_line(line=line, setting=setting)
            data: vnpy.tools.database_cta.CtaStatisticsData = mapping.get(kkkk, None)
            if data is None:
                continue
            data_list.append(data)

        # 数量不同, 说明找不到部分结果, 疑似参数距离边界太近了,此时建议丢弃这些值,
        if len(data_list) != len(setting_list):
            continue

        # 是否存在低于边界值的结果, 如果存在, 就表示"边长"范围内有些数据质量不高, 中心点的参数质量不高,
        low_list: list = [data for data in data_list if data.result[target_name] <= boundary_value]
        if len(low_list) > 0:
            continue

        destination.append(line)

        message: dict = {
            "idx": idx,
            "idx_dst": len(destination),
            "setting": line.setting,
            "result": line.result,
            "filter_cond": filter_cond,
            "target_name": target_name,
            "percentiles": percentiles,
            "edge_length": edge_length,
        }
        logging.info(f"message={message}")

        if data_number <= len(destination):
            break

    logging.info(f"target_name={target_name}, len_destination={len(destination)}")


@dataclasses.dataclass(frozen=True, eq=True, order=True)
class CtaSetting:
    """
    strategy_name: DoubleMaStrategy/TurtleSignalStrategy
    target_name  : total_return/sharpe_ratio/return_drawdown_ratio/daily_net_pnl
    """
    strategy_name: str = None
    exchange: Exchange = None
    symbol: str = None
    interval: Interval = None
    start: datetime.datetime = None
    end: datetime.datetime = None
    filter_cond: Optional[dict] = None  # 过滤条件
    target_name: str = None  # 优化目标
    percentiles: float = 0.50  # 百分位数(0.50 : 五十分位)
    edge_length: int = 2  # 边长
    data_number: int = 1  # 选择多少条数据


def main_function(cta_setting: CtaSetting, sqlite_filename: Optional[str] = None):
    """"""
    if sqlite_filename:
        # Peewee 如何在Peewee中动态设置SQLite数据库文件
        # https://geek-docs.com/peewee/peewee-questions/12_peewee_how_can_i_dynamically_set_the_sqlite_database_file_in_peewee.html
        path: str = str(vnpy.trader.utility.get_file_path(sqlite_filename))
        db = vnpy.tools.database_cta.DbCtaStatisticsData._meta.database
        assert isinstance(db, playhouse.sqlite_ext.SqliteExtDatabase)
        db.init(database=path)

    logging.info(f"database={vnpy.tools.database_cta.DbCtaStatisticsData._meta.database.database}")

    assert cta_setting.target_name in OptimizationSettingEditor.DISPLAY_NAME_MAP.values()

    select_count: int = (
        vnpy.tools.database_cta.DbCtaStatisticsData.select()
        .where(
            True
            & (vnpy.tools.database_cta.DbCtaStatisticsData.strategy_name == cta_setting.strategy_name)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.exchange == cta_setting.exchange.value)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.symbol == cta_setting.symbol)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.interval == cta_setting.interval.value)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.start == cta_setting.start)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.end == cta_setting.end)
        ).count()
    )

    logging.info(f"select_count={select_count}")

    # 筛选 DbCtaBacktestData.setting 这个 JSONField 字段,
    wwwww = True
    if cta_setting.filter_cond:
        for kkk, vvv in cta_setting.filter_cond.items():
            wwwww = wwwww & (vnpy.tools.database_cta.DbCtaStatisticsData.setting[kkk] == vvv)

    model_select: peewee.ModelSelect = (
        vnpy.tools.database_cta.DbCtaStatisticsData.select()
        .where(
            True
            & (vnpy.tools.database_cta.DbCtaStatisticsData.strategy_name == cta_setting.strategy_name)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.exchange == cta_setting.exchange.value)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.symbol == cta_setting.symbol)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.interval == cta_setting.interval.value)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.start == cta_setting.start)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.end == cta_setting.end)
            & (vnpy.tools.database_cta.DbCtaStatisticsData.result[cta_setting.target_name] > 0)
            & wwwww  # vnpy.tools.database_cta.DbCtaStatisticsData.setting
        ).order_by(vnpy.tools.database_cta.DbCtaStatisticsData.result[cta_setting.target_name].desc(), vnpy.tools.database_cta.DbCtaStatisticsData.id)
    )

    logging.info(f"model_select={type(model_select)}")

    lines: List[vnpy.tools.database_cta.DbCtaStatisticsData] = []
    mapping: Dict[tuple, vnpy.tools.database_cta.DbCtaStatisticsData] = {}

    for db_line in model_select:
        db_line: vnpy.tools.database_cta.DbCtaStatisticsData = db_line
        lines.append(db_line)
        map_key: tuple = vnpy.tools.utility.dict2tuple(db_line.setting)
        mapping[map_key] = db_line

    assert len(lines) == len(mapping)

    logging.info(f"len_lines={len(lines)}, len_mapping={len(mapping)},")

    # 计算边界值,
    percentile = lines[int((len(lines) - 1) * (1 - cta_setting.percentiles) * 1)].result[cta_setting.target_name]  # 分位值,
    # 75分位: [75%的数据 < 75分位值 < 25%的数据] 这个数据超过了75%的数据, 只有25%的数据大于它,
    average = sum([_.result[cta_setting.target_name] for _ in lines]) / len(lines)  # 平均值,
    assert 0 < percentile and 0 < average
    boundary_value = max(percentile, average)  # 取较大的那一个作为边界值,

    logging.info(f"boundary_value={boundary_value:.4f}, average={average:.4f}, percentile={percentile:.4f}")

    step_s: dict = {}
    if lines:
        for setting_key in lines[0].setting.keys():
            if isinstance(lines[0].setting[setting_key], str):
                assert len(set(_.setting[setting_key] for _ in lines)) == 1
                step_s[setting_key] = lines[0].setting[setting_key]
            else:
                step_s[setting_key] = round(min(numpy.diff(sorted(set(_.setting[setting_key] for _ in lines)))), ndigits=9)
        for k, v in step_s.items():
            if isinstance(v, str):
                pass
            elif isinstance(v, numpy.integer):
                step_s[k] = int(v)
            elif isinstance(v, numpy.floating):
                step_s[k] = float(v)
            else:
                raise RuntimeError

    # 开始计算,
    destination: List[vnpy.tools.database_cta.CtaStatisticsData] = []

    dest_list: List[Dict[str, Any]] = []

    for idx_src, db_line in enumerate(lines):
        db_line: vnpy.tools.database_cta.DbCtaStatisticsData = db_line

        item_list: List[vnpy.tools.database_cta.DbCtaStatisticsData] = xxxxx(
            mapping=mapping,
            db_line=db_line,
            filter_cond=cta_setting.filter_cond,
            edge_length=cta_setting.edge_length,
            target_name=cta_setting.target_name,
            boundary_value=boundary_value,
            step_s=step_s,
        )

        if not item_list:
            continue

        py_line: vnpy.tools.database_cta.CtaStatisticsData = vnpy.tools.database_cta.CtaStatisticsData.db2py(db_line=db_line)
        destination.append(py_line)

        py_dict: Dict[str, Any] = dataclasses.asdict(obj=py_line)

        average_mapping: Dict[str, float] = {}

        average: float = calculate_average(item_list=item_list, target_name=cta_setting.target_name)
        average_mapping[f"edge_{cta_setting.edge_length}"] = round(average, ndigits=4)

        for edge_len in range(cta_setting.edge_length, -1, -1):
            item_list: List[vnpy.tools.database_cta.DbCtaStatisticsData] = xxxxx(
                mapping=mapping,
                db_line=db_line,
                filter_cond=cta_setting.filter_cond,
                edge_length=edge_len,
                target_name=cta_setting.target_name,
                boundary_value=None,
                step_s=step_s,
            )
            average: float = calculate_average(item_list=item_list, target_name=cta_setting.target_name)
            average_mapping[f"edge_{edge_len}"] = round(average, ndigits=4)

            assert (edge_len != 0) or (average == py_line.result[cta_setting.target_name])

        py_dict.update(average_mapping)
        py_dict.update({f"setting_{k}": v for k, v in py_line.setting.items()})
        py_dict.update({f"result_{k}": v for k, v in py_line.result.items()})
        py_dict["idx_src"] = idx_src
        py_dict["idx_dst"] = len(destination)
        py_dict["filter_cond"] = cta_setting.filter_cond
        py_dict["target_name"] = cta_setting.target_name
        py_dict["percentiles"] = cta_setting.percentiles
        py_dict["edge_length"] = cta_setting.edge_length

        dest_list.append(py_dict)

        if cta_setting.data_number <= len(destination):
            break

    logging.info(f"target_name={cta_setting.target_name}, len_destination={len(destination)}")

    header1: List[str] = [
        "symbol",
        "strategy_name",
        "edge_0",
        "edge_1",
        "edge_2",
        "edge_3",
        "edge_4",
        "idx_src",
        "idx_dst",
        "setting_line_window",
        "setting_fast_window",
        "setting_slow_window",
        "result_total_return",
        "result_sharpe_ratio",
        "result_return_drawdown_ratio",
        "result_daily_net_pnl",
        "start",
        "end",
    ]

    saveByDict(allLine=dest_list, filepath=f"{__file__}.csv", header1=header1)


def xxxxx(
    mapping: Dict[tuple, vnpy.tools.database_cta.DbCtaStatisticsData],
    db_line: vnpy.tools.database_cta.DbCtaStatisticsData,
    filter_cond: Optional[dict],
    edge_length: int,
    target_name: str,
    boundary_value: Optional[float],
    step_s: dict,
) -> List[vnpy.tools.database_cta.DbCtaStatisticsData]:
    """"""
    # 生成"边长"范围内的所有参数,
    params = {}
    for key, val in db_line.setting.items():
        if (filter_cond is not None) and (key in filter_cond):
            params[key] = [filter_cond[key], ]
            continue
        # 比如 line_window=5 叠加 edge_length=2, 那么 line_window 取值范围是 [3,4,5,6,7]

        if isinstance(val, str):
            params[key] = [val]
            continue

        params[key] = [*vnpy.tools.utility.range2(
            round(val - edge_length * step_s[key], ndigits=9),
            round(val + edge_length * step_s[key], ndigits=9),
            round(step_s[key], ndigits=9),
            stop_is_last=True,
        )]

    # 生成参数列表,
    optimization_setting = OptimizationSetting()
    optimization_setting.params = params
    setting_list: List[dict] = optimization_setting.generate_settings()

    # 查询"边长"范围内的所有结果,
    item_list: List[vnpy.tools.database_cta.DbCtaStatisticsData] = []

    for setting in setting_list:
        map_key: tuple = vnpy.tools.utility.dict2tuple(setting)
        item: vnpy.tools.database_cta.DbCtaStatisticsData = mapping.get(map_key, None)
        if item is None:
            # 找不到, 可能这个 setting 已经跳出参数集合了, 可能这个 setting 的 result 不符合要求, 没有提取出数据库,
            # 总之, 已经没必要继续查了, 数据有缺失, 数量对不上,
            # 跳出参数集合: 1<=line_window<=60, edge_length=2, 会生成 setting.line_window==-1 的数据, 这个 setting 已经跳出参数集合了,
            break
        if (boundary_value is not None) and (item.result[target_name] <= boundary_value):
            # 这个 setting 的 result 低于边界值, 这个 setting 质量不高, 生成这个 setting 的 db_line.setting 质量也不太高的样子,
            # 总之, 已经没必要继续查了,
            break
        item_list.append(item)

    # 数量对不上, 丢弃这个 db_line.setting,
    if len(item_list) != len(setting_list):
        item_list.clear()

    return item_list


def calculate_average(item_list: List[vnpy.tools.database_cta.DbCtaStatisticsData], target_name: str):
    """"""
    # 点 db_line 及其周围的点的在 target_name 方面上的平均值,
    average: float = sum([_.result[target_name] for _ in item_list]) / len(item_list)
    #
    return average


def initialize_logging():
    """"""
    c_handler: logging.StreamHandler = logging.StreamHandler()
    f_handler: logging.FileHandler = logging.FileHandler(filename=f"{__file__}.log", mode="a", encoding="utf8")
    logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s", handlers=[c_handler, f_handler])


if __name__ == "__main__":
    initialize_logging()

    cta_setting: CtaSetting = CtaSetting(
        strategy_name="TrendlinesWithBreaksStrategy",
        exchange=Exchange.SHFE,
        symbol="agL8",
        interval=Interval.MINUTE05,
        start=datetime.datetime(year=2023, month=12, day=29, hour=19, minute=0, second=0),
        end=datetime.datetime(year=2025, month=1, day=8, hour=17, minute=0, second=0),
        filter_cond=None,
        target_name="total_return",
        percentiles=0.5,
        edge_length=4,
        data_number=999,
    )

    # SELECT strategy_name,exchange,symbol,interval,start,end,count(*) FROM dbctabacktestdata GROUP BY strategy_name,exchange,symbol,interval,start,end;
    main_function(cta_setting=cta_setting, sqlite_filename="")
    # SELECT
    # setting_line_window,setting_fast_window,setting_slow_window,COUNT(*),SUM(result_total_return)
    # FROM `guess#py$` WHERE 1=1
    # AND edge_0 >= edge_1
    # AND edge_1 >= edge_2
    # AND edge_2 >= edge_3
    # AND edge_3 >= edge_4
    # GROUP BY setting_line_window,setting_fast_window,setting_slow_window
