# -*- coding: utf-8 -*-
from datetime import datetime
import hashlib
import json
from os.path import expanduser

from collections import defaultdict
import random
import time
from typing import Iterable

from anyio import sleep
import numpy as np
import pandas as pd
import wqb
import asyncio

last_auth_time = 0

def on_start(vars: dict[str, object]) -> None:
    global wqbs
    global last_login_time


    # 定时刷新 session（4小时）
    current_time = time.time()
    if current_time - last_auth_time >4*3600:
        try:
            wqbs.delete_authentication()
            wqbs.post_authentication()
            wqbs.logger.info("定时刷新 session 成功")
            last_login_time = current_time # 更新登录时间
        except Exception as e:
            wqbs.logger.error(f"定时刷新 session 失败: {e}")

    print(vars['resp'])

def filter_alphas(
    wqbs: wqb.WQBSession,
    status: str = 'UNSUBMITTED',
    region: str=None,
    delay: int=None,
    universe: str=None,
    sharpeFilterRange: wqb.FilterRange = None,
    fitnessFilterRange: wqb.FilterRange = None,
    dateCreatedFilterRange: wqb.FilterRange = None,
    turnoverFilterRange: wqb.FilterRange = None,
    order: str = 'is.sharpe',
    others: Iterable[str] = None,
    limit: int = 2000,
    log_name: str = None,
) -> list:
    """获取alpha列表"""
    if log_name is None:
        log_name = f"{self.__class__.__name__}#{filter_alphas.__name__}"
    
    list = []
    page_size = 100
    offset = 0
    while True:
        
        resp = wqbs.filter_alphas_limited(
            status=status,
            region=region,
            delay=delay,
            universe=universe,
            sharpe=sharpeFilterRange,
            fitness=fitnessFilterRange,
            turnover=turnoverFilterRange,
            date_created=dateCreatedFilterRange,
            order=order,
            others=others,
            limit=page_size,
            offset=offset,
            log=log_name
        )
        
        data = resp.json()
        if not data:
            print(f"没有符合条件的数据...")
            break
        if offset == 0 and data != {}:
            print(f"本次查询条件下共有{data['count']}条数据...")
        list.extend(data['results'])
        if len(list) >= limit:
            break
        # 小于本次查询数量limit
        if len(data['results']) < page_size:
            break
        offset += page_size
    return list

def submitable_alphas(wqbs: wqb.WQBSession
        , start_time:str
        , end_time:str
        , limit:int = 50
        , sharpe:float = 1.58
        , fitness:float = 1.0
        , order:str='dateCreated'
        , others:Iterable[str]=None) -> list:
    """可提交的alpha"""
    list = filter_alphas(
        wqbs,
        status='UNSUBMITTED',
        # region='USA',
        # delay=1,
        # universe='TOP3000',
        sharpeFilterRange=wqb.FilterRange.from_str(f'[{sharpe}, inf)'),
        fitnessFilterRange=wqb.FilterRange.from_str(f'[{fitness}, inf)'),
        # turnoverFilterRange=wqb.FilterRange.from_str('[0.01, 0.7]'),
        dateCreatedFilterRange=wqb.FilterRange.from_str(f'[{start_time}, {end_time}]'),
        order=order,
        others=others,
        limit=limit,
        log_name="utils#submitable_alphas"
    )
    # 过滤掉不合格的Alpha
    # list = [alpha for alpha in list if alpha['grade'].upper() != 'INFERIOR']
    if len(list) == 0:
        print('没有可提交的 Alpha...')
        return list
    failed_ids = []
    with open('./results/check_fail_ids.csv', 'r') as f:
        failed_ids = set(line.strip() for line in f)
    # 过滤掉已处理的 Alpha
    list = [alpha for alpha in list if alpha['id'] not in failed_ids]
    return list

def filter_failed_alphas(wqbs:wqb.WQBSession, alpha_list: list) -> list:
    """过滤掉有FAIL指标的的alpha"""
    list = []
    failed_ids = []
    lines = []
    print('过滤掉有FAIL指标的Alpha...')
    for alpha in alpha_list:
        checks = alpha['is']['checks']
        fail = False
        for check in checks:
            if check['result'] == 'FAIL':
                wqbs.patch_properties(alpha_id=alpha['id'], color="RED", log=f'utils#filter_failed_alphas')
                fail = True
                failed_ids.append(alpha['id'])
                break
        if fail:
            lines.append(f"{alpha['id']}\n")
            continue
        list.append(alpha)
    
    if len(failed_ids) > 0:
        save_lines_to_file( './results/check_fail_ids.csv', lines)
        # batch_ids = [failed_ids[i:i + 20] for i in range(0,len(failed_ids), 20)]
        # for ids in batch_ids:
        #     data = []
        #     for id in ids:
        #         data.append({"id": id,"color":"RED"})
        #     wqbs.patch(f'{wqb.WQB_API_URL}/alphas', json=data)
        

    return list

def is_favorable(wqbs: wqb.WQBSession, alpha_id:str,  iqc_id='IQC2025S2', improve:int=0) -> bool:
    """
    判断 Alpha 是可收藏的
    判断标准：
    1. 该alpha的提交before和after的Change(名次)是否上升大于improve
    """
    score_diff = wqbs.get_performance(alpha_id=alpha_id, iqc_id=iqc_id)

    print(f"Alpha {alpha_id} 的Change(名次)为: {score_diff}")
    return score_diff > improve


def load_credentials(credentials_file: str):
    """从文件加载凭据"""
    try:
        with open(expanduser(credentials_file)) as f:
            credentials = json.load(f)
        return credentials[0], credentials[1]
    except Exception as e:
        print(f"Failed to load credentials: {str(e)}")
        raise

def hash(simulation_data:dict) -> str:
    """生成稳定的哈希值"""
    parts = []
    
    # 处理常规字段
    regular_keys = sorted([k for k in simulation_data if k != 'settings'])
    for key in regular_keys:
        parts.append(f"{key}={simulation_data[key]}&")
    
    # 特殊处理settings字典
    if 'settings' in simulation_data and isinstance(simulation_data['settings'], dict):
        settings = simulation_data['settings']
        for key in sorted(settings):
            parts.append(f"{key}={settings[key]}&")
    
    # 拼接所有部分并生成哈希
    param_kv_str = ''.join(parts)
    return hashlib.md5(param_kv_str.encode('utf-8')).hexdigest()

def save_lines_to_file(dest_file: str, lines: list):
    """保存内容到文件"""
    with open(dest_file, 'a') as f:
        f.writelines(lines)

    print(f"✅ {len(lines)} 行已保存")


def prune(next_alpha_recs, prefix, keep_num):
    # prefix is the datafield prefix, fnd6, mdl175 ...
    # keep_num is the num of top sharpe same-datafield alpha
    output = []
    num_dict = defaultdict(int)
    for rec in next_alpha_recs:
        exp = rec[1]
        field = exp.split(prefix)[-1].split(",")[0]
        sharpe = rec[2]
        if sharpe < 0:
            field = "-%s"%field
        if num_dict[field] < keep_num:
            num_dict[field] += 1
            decay = rec[-1]
            exp = rec[1]
            output.append([exp,decay])
    return output

def get_dataset_fields(
    wqbs: wqb.WQBSession, 
    dataset_id: str,
    region: str="EUR", 
    delay: int=1, 
    universe: str="TOP2500",
    limit=200,
    order:str='-alphaCount'
) -> list:
    """
    获取数据集的字段
    """

    dataset_fields = []
    kwargs = {"order": order}
    resps = wqbs.search_fields(
        region=region,
        delay=delay,
        universe=universe,
        dataset_id=dataset_id,
        limit=limit,
        log="utils#get_dataset_fields",
        **kwargs
    )
    for idx, resp in enumerate(resps, start=1):
        # print(f"正在获取第 {idx} 页数据集字段...")
        data = resp.json()
        # print(f"第 {idx} 页数据集字段: {len(data['results'])} 个")
        dataset_fields.extend(data['results'])
    return dataset_fields

def get_pnl_data(wqbs: wqb.WQBSession, alpha_id: str) -> pd.DataFrame:
    """获取alpha的pnl数据"""
    # 可能会被限流
    resp = wqbs.get(f"{wqb.WQB_API_URL}/alphas/{alpha_id}/recordsets/pnl")
    retry_after = float(resp.headers.get(wqb.RETRY_AFTER, 0))
    if retry_after > 0:
        time.sleep(retry_after)
        return get_pnl_data(wqbs, alpha_id)

    return resp.json()

def sort_by_grade(alpha_first: dict, alpha_second: dict) -> int:
    """根据alpha的grade排序"""
    # 如果两个alpha的grade相同，则返回0
    # 如果第一个alpha的grade是INFERIOR，则返回1
    # 如果第二个alpha的grade是INFERIOR，则返回-1
    # 否则返回0
    first_grade = alpha_first['grade']
    second_grade = alpha_second['grade']
    
    if first_grade == second_grade:
        return 0
    if first_grade is None:
        return -1
    if second_grade is None:
        return 1

    if first_grade.upper() == 'INFERIOR':
        return 1
    if first_grade.upper() == 'AVERAGE':
        return 1
    return -1

def calculate_composite_score(arr):
    """
     计算综合得分，考虑各字段与 os 的关系
     :param arr: 输入的数组，包含 sharpe, turnover, fitness, margin, returns
     :return: 综合得分
    """
    sharpe, turnover, fitness, margin, returns = arr[:, 2], arr[:, 3], arr[:, 4], arr[:, 5], arr[:, 10]

    # 归一化处理
    sharpe_norm = (sharpe - np.min(sharpe)) / (np.max(sharpe) - np.min(sharpe))
    turnover_norm = (turnover - np.min(turnover)) / (np.max(turnover) - np.min(turnover))
    fitness_norm = (fitness - np.min(fitness)) / (np.max(fitness) - np.min(fitness))
    margin_norm = (margin - np.min(margin)) / (np.max(margin) - np.min(margin))
    returns_norm = (returns - np.min(returns)) / (np.max(returns) - np.min(returns))

    # 由于 turnover 与 os 负相关，其他与 os 正相关
    return sharpe_norm - turnover_norm + fitness_norm + margin_norm + returns_norm


def sort_by_composite_score(arr):
    """
    根据综合得分对数组进行降序排序
    :param arr: 输入的数组
    :return: 排序后的数组
    """
    composite_score = calculate_composite_score(arr)
    sorted_indices = np.argsort(composite_score)[::-1]
    # 将综合得分添加到原数组的最后一列
    arr_with_score = np.column_stack((arr, composite_score))
    return arr_with_score[sorted_indices]

def sort_alpha_by_composite_score(arr):
    alphas = np.array([sublist[:11] for sublist in arr], dtype=object)
    return sort_by_composite_score(alphas)

def get_prod_corr(wqbs: wqb.WQBSession, alpha_id):
    """获取alpha与prod的相关性"""
    # 可能会被限流
    resp = wqbs.get(f"{wqb.WQB_API_URL}/alphas/{alpha_id}/correlations/prod")
    remaining = int(resp.headers.get('Ratelimit-Remaining', 0))
    if remaining <= 0:
        print(f"PC接口被限流，等待30s")
        time.sleep(30)
        return
    if resp.status_code != 200:
        print(f"PC接口请求失败，状态码: {resp.status_code}")
        # return pd.DataFrame(columns=["correlation"])
        return None
    RemainTimes = int(resp.headers.get("RateLimit-Remaining", 0))
    ResetTime = time.time() + int(resp.headers.get("RateLimit-Reset", 60))
    if RemainTimes <= 3 and ResetTime - time.time() > 0:
        print(f"剩余Prod Correlation查询次数: {RemainTimes}, 重置时间: {ResetTime - time.time()} 秒后.")
        sleep(max(3, ResetTime - time.time()))

    resp_json = resp.json()
    if "records" in resp_json:
        columns = [dct["name"] for dct in resp_json["schema"]["properties"]]
        self_corr_df = pd.DataFrame(resp_json["records"], columns=columns)
        if not self_corr_df.empty:
            print(f'{alpha_id} max: {resp_json["max"]} min: {resp_json["min"]}')
            # 取min/max 绝对值最大
            max_abs = abs(resp_json["max"])
            min_abs = abs(resp_json["min"])
            return resp_json["max"] if max_abs > min_abs else resp_json["min"]
            # return self_corr_df
        
    else:
        # return pd.DataFrame(columns=["correlation"])
        return None
    
def retry_get_prod_corr(wqbs: wqb.WQBSession, alpha_id, max_retry=3):
    """重试获取alpha与prod的相关性"""
    retry_count = 0
    while retry_count < max_retry:
        try:
            self_corr_df = get_prod_corr(wqbs, alpha_id)
            # if not self_corr_df.empty:
            if self_corr_df is not None:
                return self_corr_df
        except json.JSONDecodeError:
            pass
        retry_count += 1
        time.sleep(60)  # Wait for 60 second before the next retry
        #print(retry_count)
    return None

async def get_self_corr(wqbs: wqb.WQBSession, alpha_id):
    """获取自相关性"""
    resp = await wqbs.retry(
        wqb.GET, f"{wqb.WQB_API_URL}/alphas/{alpha_id}/correlations/self", max_tries=200, log="get_self_corr"
    )


def retry_request(wqbs: wqb.WQBSession, method, api_uri, max_tries=50, log=None, **kwargs):
    
    for i in range(max_tries):
        try:
            resp = wqbs.request(method, f"{wqb.WQB_API_URL}/{api_uri}", log=log, **kwargs)
            if resp.status_code == 200:
                remaining = int(resp.headers.get('Ratelimit-Remaining', 0))
                if remaining <= 0:
                    print(f"PC接口被限流，等待30s")
                    time.sleep(30)
                    continue
                return resp
            else:
                print(f"PC接口请求失败，状态码: {resp.status_code}")

        except json.JSONDecodeError:
            pass
        retry_count += 1
        time.sleep(60)  # Wait for 60 second before the next retry
        #print(retry_count)
    return None

def save_to_file(sim_data_list:list, region:str='', dataset_id:str=None, step:str = '', max_num:int = 30000):
    print(f'📋 打乱顺序...')
    random.shuffle(sim_data_list)
    # self.mapper.bath_save(sim_data_list,field_prefix=prefix)
    # return sim_data_list[:20000]
    print(f'🔄 截取前{max_num}保存到文件...')
    sim_data_list = sim_data_list[:max_num]
    if dataset_id is None:
        dataset_id = step
    file_path = f"expressions/{datetime.strftime(datetime.now(), "%Y-%m-%d")}_{dataset_id}_{region}.json"
    # 写入 JSON 文件
    with open(file_path, "w", encoding="utf-8") as file:
        json.dump(sim_data_list, file, ensure_ascii=False, indent=4)

    print(f'✅ 保存结束...')