"""
    通用基金数据采集脚本
"""
import sys
import json
import math
from datetime import datetime, date
from typing import List
import contextlib
import io
import time
import os
import tempfile

LOCK_PATH = os.path.join(tempfile.gettempdir(), "jq_global_lock.lock")
def acquire_lock():
    """文件锁实现"""
    while True:
        try:
            fd = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
            os.close(fd)
            return
        except FileExistsError:
            time.sleep(0.2)
def release_lock():
    """释放文件锁"""
    try:
        os.remove(LOCK_PATH)
    except FileNotFoundError:
        pass
#导入聚宽SDK
try:
    import pandas as pd
    from jqdatasdk import auth, is_auth, query, finance, get_extras, get_mtss, get_all_securities
    JQ_AVAILABLE = True
except Exception as e:
    JQ_AVAILABLE = False
    print(json.dumps({
        "status": "error",
        "message": f"缺少依赖库：{e}，请执行 pip install jqdatasdk pandas"
    }, ensure_ascii=False))
    sys.exit(1)
#工具函数
def serialize_dataframe(df: pd.DataFrame) -> List[dict]:
    """
    将DataFrame转换为可JSON序列化的对象列表：
    -日期类型统一转换为字符串 yyyy-mm-dd
    -numpy数据类型转为 Python 基础类型
    -NaN转为 one
    """
    if df is None or df.empty:
        return []
    df = df.where(pd.notnull(df), None)
    rows = []
    for _, row in df.iterrows():
        record = {}
        for col, val in row.items():
            if isinstance(val, float) and (math.isnan(val) or math.isinf(val)):
                record[col] = None
                continue
            if hasattr(val, "__float__") and not isinstance(val, (int, float, bool)) and not isinstance(val, str):
                try:
                    numeric_val = float(val)
                    if math.isnan(numeric_val) or math.isinf(numeric_val):
                        record[col] = None
                        continue
                except Exception:
                    pass
            if isinstance(val, (pd.Timestamp, datetime, date)):
                record[col] = val.strftime("%Y-%m-%d")
            elif hasattr(val, "item"):
                try:
                    record[col] = val.item()
                except Exception:
                    record[col] = val
            else:
                record[col] = val
        rows.append(record)
    return rows
def normalize_to_main_codes(codes: List[str]) -> List[str]:
    """
    000001.OF/159915.XSHE /501300.XSHG转换为6位主代码，
    """
    if not codes:
        return []
    normalized = []
    seen = set()
    for code in codes:
        if not code:
            continue
        if isinstance(code, str):
            main_code = code.split('.')[0]
        else:
            main_code = str(code)
        if main_code not in seen:
            seen.add(main_code)
            normalized.append(main_code)
    return normalized
#聚宽认证
def authenticate(username: str, password: str) -> bool:
    try:
        if is_auth():
            return True
        buf = io.StringIO()
        with contextlib.redirect_stdout(buf):
            auth(username, password)
        if is_auth():
            return True
        return False
    except Exception as e:
        return False
#各类数据采集函数
def fetch_fund_main_info(main_codes, start_date=None, end_date=None):
    """
    获取基金主体信息（FUND_MAIN_INFO）
    """
    if not main_codes or len(main_codes) == 0:
        raise ValueError("main_codes 参数不能为空，必须提供基金主体代码列表")
    q = query(finance.FUND_MAIN_INFO).filter(finance.FUND_MAIN_INFO.main_code.in_(main_codes))
    df = finance.run_offset_query(q)
    return serialize_dataframe(df)
def fetch_fund_dividend(codes, start_date, end_date):
    if not codes or len(codes) == 0:
        return []
    normalized_codes = normalize_to_main_codes(codes)
    if not normalized_codes:
        return []
    try:
        q = query(finance.FUND_DIVIDEND).filter(finance.FUND_DIVIDEND.code.in_(normalized_codes))
        if start_date:
            q = q.filter(finance.FUND_DIVIDEND.pub_date >= start_date)
        if end_date:
            q = q.filter(finance.FUND_DIVIDEND.pub_date <= end_date)
        df = finance.run_offset_query(q)

        return serialize_dataframe(df)
    except Exception as e:

        return []

def fetch_fin_indicator(codes, start_date, end_date):

    if not codes or len(codes) == 0:

        return []
    normalized_codes = normalize_to_main_codes(codes)
    if not normalized_codes:

        return []
    try:

        q = query(finance.FUND_FIN_INDICATOR).filter(finance.FUND_FIN_INDICATOR.code.in_(normalized_codes))
        if start_date:
            q = q.filter(finance.FUND_FIN_INDICATOR.pub_date >= start_date)
        if end_date:
            q = q.filter(finance.FUND_FIN_INDICATOR.pub_date <= end_date)
        df = finance.run_offset_query(q)

        return serialize_dataframe(df)
    except Exception as e:

        return []

def fetch_mf_daily_profit(codes, start_date, end_date):

    if not codes or len(codes) == 0:

        return []
    normalized_codes = normalize_to_main_codes(codes)
    if not normalized_codes:

        return []
    try:

        q = query(finance.FUND_MF_DAILY_PROFIT).filter(finance.FUND_MF_DAILY_PROFIT.code.in_(normalized_codes))
        if start_date:
            q = q.filter(finance.FUND_MF_DAILY_PROFIT.end_date >= start_date)
        if end_date:
            q = q.filter(finance.FUND_MF_DAILY_PROFIT.end_date <= end_date)
        df = finance.run_offset_query(q)

        return serialize_dataframe(df)
    except Exception as e:

        return []

def fetch_net_value(codes, start_date, end_date):
    if not codes or len(codes) == 0:
        return []
    normalized_codes = normalize_to_main_codes(codes)
    if not normalized_codes:
        return []
    q = query(finance.FUND_NET_VALUE).filter(finance.FUND_NET_VALUE.code.in_(normalized_codes))
    if start_date:
        q = q.filter(finance.FUND_NET_VALUE.day >= start_date)
    if end_date:
        q = q.filter(finance.FUND_NET_VALUE.day <= end_date)
    df = finance.run_offset_query(q)
    return serialize_dataframe(df)

def fetch_mtss(codes, start_date, end_date, fields, count):
    df = get_mtss(
        codes,
        start_date=start_date,
        end_date=end_date,
        fields=fields,
        count=count
    )
    return serialize_dataframe(df)

def resolve_fund(fund_types=None, limit=150):
    df_exchange = get_all_securities(types=['fund'], date=None)
    codes_exchange = df_exchange.index.tolist()
    df_open = get_all_securities(types=['open_fund'], date=None)
    codes_open = df_open.index.tolist()
    buckets = {
        "XSHG": [],
        "XSHE": [],
        "OF": []
    }
    for code in codes_exchange:
        suffix = code.split('.')[-1]
        if suffix == "XSHG":
            buckets["XSHG"].append(code)
        elif suffix == "XSHE":
            buckets["XSHE"].append(code)
    for code in codes_open:
        buckets["OF"].append(code)
    if not fund_types or len(fund_types) == 0:
        fund_types = ["XSHG", "XSHE", "OF"]
    jq_codes = []
    for ft in fund_types:
        if ft in buckets:
            jq_codes.extend(buckets[ft][:limit])
    jq_codes = list(dict.fromkeys(jq_codes))
    main_codes = sorted({code.split('.')[0] for code in jq_codes})
    return jq_codes, main_codes

def fetch_all_fund_list(fund_types=None, limit=150):
    jq_codes, all_main_codes = resolve_fund(
        fund_types=fund_types,
        limit=None
    )
    if not all_main_codes:
        return []
    main_to_full = {c.split('.')[0]: c for c in jq_codes}
    batch_size = 500
    alive_records = []
    for i in range(0, len(all_main_codes), batch_size):
        batch = all_main_codes[i:i + batch_size]
        q = query(finance.FUND_MAIN_INFO).filter(
            finance.FUND_MAIN_INFO.main_code.in_(batch),
            finance.FUND_MAIN_INFO.end_date.is_(None)
        )
        df_batch = finance.run_offset_query(q)
        if df_batch is None or df_batch.empty:
            continue
        alive_records.append(df_batch)
        if sum(len(df) for df in alive_records) >= limit:
            break
    if not alive_records:
        return []
    df_final = pd.concat(alive_records, ignore_index=True)
    df_final = df_final.head(limit)
    df_final["code"] = df_final["main_code"].apply(
        lambda m: main_to_full.get(m)
    )
    df_final["fund_type"] = df_final["code"].apply(
        lambda c: c.split('.')[-1] if isinstance(c, str) and '.' in c else None
    )
    return serialize_dataframe(df_final)

#类型分发表
HANDLER_MAP = {
    "ALL_FUND_LIST": fetch_all_fund_list,
    "FUND_MAIN_INFO": fetch_fund_main_info,
    "FUND_DIVIDEND": fetch_fund_dividend,
    "FUND_FIN_INDICATOR": fetch_fin_indicator,
    "FUND_MF_DAILY_PROFIT": fetch_mf_daily_profit,
    "FUND_NET_VALUE": fetch_net_value,
    "FUND_MTSS": fetch_mtss,
}

def main():
    acquire_lock()
    try:
        raw = sys.stdin.read()
        config = json.loads(raw)
        username = config.get("username")
        password = config.get("password")
        params = config.get("params", {})
        if not authenticate(username, password):
            raise Exception("聚宽账号认证失败，请检查用户名或密码")
        data_type = params.get("data_type")
        if data_type not in HANDLER_MAP:
            raise Exception(f"不支持的数据类型：{data_type}")
        codes = params.get("fund_codes") or params.get("codes") or params.get("fundCodes") or []
        main_codes = params.get("main_codes") or params.get("mainCodes") or []
        start_date = params.get("start_date")
        end_date = params.get("end_date")
        count = params.get("count")
        info_type = params.get("info_type")
        fields = params.get("fields")
        fund_types = params.get("fund_types")
        limit = params.get("limit", 150)
        handler = HANDLER_MAP[data_type]
        auto_jq_codes = []
        auto_main_codes = []
        if (not codes and not main_codes) and fund_types:
            auto_jq_codes, auto_main_codes = resolve_fund(
                fund_types=fund_types,
                limit=limit
            )
        if data_type == "ALL_FUND_LIST":
            result = handler(fund_types=fund_types, limit=limit)
        elif data_type == "FUND_MAIN_INFO":
            if not main_codes:
                if auto_main_codes:
                    main_codes = auto_main_codes
                elif codes:
                    main_codes = [c.split('.')[0] for c in codes]
            if not main_codes:
                raise Exception("FUND_MAIN_INFO 需要 main_codes / fund_codes 或 fund_types")
            result = handler(main_codes, None, None)
    #FUND_MTSS
        elif data_type == "FUND_MTSS":
            if not codes:
                if auto_jq_codes:
                    codes = auto_jq_codes
                else:
                    raise Exception("FUND_MTSS 必须提供 fund_codes 或 fund_types")
            result = handler(codes, start_date, end_date, fields, count)
        #其他表：FUND_DIVIDEND / FUND_FIN_INDICATOR / FUND_MF_DAILY_PROFIT / FUND_NET_VALUE
        else:
            if not codes:
                if auto_jq_codes:
                    codes = auto_jq_codes
                else:
                    raise Exception(f"{data_type} 必须提供 fund_codes 或 fund_types")
            result = handler(codes, start_date, end_date)
        #唯一允许stdout的地方
        print(json.dumps({
            "status": "success",
            "dataType": data_type,
            "total": len(result),
            "data": result,
            "timestamp": datetime.now().isoformat()
        }, ensure_ascii=True))
    except Exception as e:
        print(json.dumps({
            "status": "error",
            "message": str(e),
            "timestamp": datetime.now().isoformat()
        }, ensure_ascii=True))
    finally:
        release_lock()
if __name__ == "__main__":
    main()
