# -*- encoding: utf-8 -*-

import json
import math
import os
import threading
import traceback
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from lib.API.bsc import BSC
from lib.cache.bloom_filter import ibloom, ibloom_add
from lib.cache.redis_cache import redis_instance
from lib.public import load_token_data, resolve_target_end, \
    chunks, usd, write_file, save_progress, init_bloom
from settings import TOKENS_CSV, START_BLOCK, FOLLOW_LATEST, BLOCK_STEP, ADDRESS_CHUNK, POLL_INTERVAL_MS, RPC_HTTP, \
    CONFIRMATIONS, USD_THRESHOLD, THREAD_NUMBER, \
    PROCESS_NUMBER


def run():
    """TODO
        - BSC 采集
    :return:
    """
    try:
        # 加载bloom
        init_bloom()
        # 加载Token数据
        TOKENS, TOKEN_ADDRESSES = load_token_data(TOKENS_CSV)
        # 实例化client
        client = BSC(RPC_HTTP)
        # 健康检查
        if not client.health_check():
            raise ValueError('RPC节点健康检查失败')
        # 初始值
        nextStart = START_BLOCK
        # 动态调整参数
        currentBlockStep = BLOCK_STEP
        currentAddressChunk = ADDRESS_CHUNK

        # === 按地址分片 getLogs → 写 CSV ===
        addrBatches = chunks(TOKEN_ADDRESSES, currentAddressChunk)
        addrBatches = addrBatches
        # 开启多进程
        futures = []
        try:
            with ProcessPoolExecutor(max_workers=PROCESS_NUMBER) as p_executor:
                for batchIndex in range(len(addrBatches)):
                    future = p_executor.submit(
                        execute_process, batchIndex, addrBatches, nextStart,
                        client,
                        currentBlockStep, currentAddressChunk, TOKENS)
                    futures.append(future)
                # 等待所有任务完成
                for future in futures:
                    future.result()
                print(f'[进度] 扫描完成 采集结束')
            # execute_process(0, addrBatches, nextStart,client, currentBlockStep, currentAddressChunk, TOKENS)
        except KeyboardInterrupt:
            print('执行中断, [多进程] 正在清除未执行程序...')
            redis_instance.set('Interrupt_status', 'yes')
            # 取消所有未完成的任务
            for future in futures:
                future.cancel()  # 尝试取消任务（对未开始的任务有效）
            p_executor.shutdown(wait=True)  # 等待所有任务完成
            print(f'所有 [进程] 清除完毕, 采集结束')
    except Exception as e:
        print(f'[致命错误] {e}')
        print(f'[致命错误] {traceback.format_exc()}')
    return "done"


def execute_process(batchIndex, addrBatches, nextStart, client,
                    currentBlockStep, currentAddressChunk, tokens):
    rowsInPart = 0
    addrBatch = addrBatches[batchIndex]
    # 缓存中从 1 开始计数
    batchCount = batchIndex + 1
    currentBatch = batchCount  # 更新当前批次变量
    savedProgress = {}

    from settings import TARGET_END_RAW
    # 启用断点续爬 - 加载断点续传进度
    cache = redis_instance.hget('scan_progress', f'{currentBatch}')
    if cache:
        savedProgress = json.loads(cache)
        if savedProgress:
            # 接续上次执行环境
            nextStart = int(savedProgress['nextStart'])
            rowsInPart = int(savedProgress.get('rowsInPart', 0))
            TARGET_END = int(savedProgress.get('targetEnd', 0))
            if TARGET_END:
                TARGET_END_RAW = TARGET_END
        print(
            f'[配置] START_BLOCK=${START_BLOCK}, TARGET_END=${TARGET_END_RAW}, '
            f'FOLLOW_LATEST=${FOLLOW_LATEST}')
        print(
            f'[配置] BLOCK_STEP=${BLOCK_STEP}, ADDRESS_CHUNK=${ADDRESS_CHUNK}, '
            f'POLL_INTERVAL=${POLL_INTERVAL_MS}ms')
    print(
        f"[批次] {batchCount}/{len(addrBatches)} - 查询 {len(addrBatch)} 个Token地址"
    )
    if redis_instance.get('Interrupt_status') == 'yes':
        # 主程序执行中断
        print(f'主程序执行中断, 子进程: {os.getpid()} 退出完成。')
        return
    # 获取最新块
    head = client.get_block_number()
    if not head:
        print(f'获取块编号失败[get_block_number], result: {head}')
        return
    tip = int(head, 16) - CONFIRMATIONS
    # 解析 TARGET_END, 留确认深度
    targetEnd = resolve_target_end(head, TARGET_END_RAW)
    if targetEnd > tip:
        # 不能超出可确认的末尾
        targetEnd = tip

    # 计算总窗口数（用于进度显示）
    if targetEnd > nextStart:
        totalWindows = math.ceil(
            (targetEnd - nextStart + 1) / currentBlockStep)
    else:
        totalWindows = 0

    # 对于追块模式功能 - 放到定时任务中 - 用定时任务来触发
    if nextStart > targetEnd:
        if not FOLLOW_LATEST:
            print(f"[完成] 已扫描到目标区块 {targetEnd}，扫描结束")
            return  # 不追块 → 结束
        # 追块模式 - 放到定时任务中 - 用定时任务来触发 (每次任务都是最新的)
        print(
            f"[追块] 等待新区块... 当前头部: {head}, 下次扫描: {nextStart}")
        return

    if savedProgress.get('status') == 'fail':
        # 说明上一次任务失败， 重新采集
        _windowEnd = savedProgress.get('windowEnd', 0)
        if isinstance(_windowEnd, str):
            windowEnd = int(_windowEnd)
        else:
            windowEnd = _windowEnd
    else:
        # 限制最大区块空间 == targetEnd
        windowEnd = nextStart + (currentBlockStep - 1) if nextStart + (
                currentBlockStep - 1) <= targetEnd else targetEnd

    currentWindow = math.floor(
        (nextStart - START_BLOCK) / currentBlockStep) + 1
    print(
        f"[扫描] 窗口 {currentWindow}/{totalWindows} - 区块 {nextStart}-"
        f"{windowEnd} (head={head})"
    )
    print(
        f"[参数] 当前区块步长: {currentBlockStep}, 地址批次: {currentAddressChunk}"
    )

    try:
        # 定义 Transfer 事件的 ABI
        transfer_text = "Transfer(address,address,uint256)"
        _status, logs = client.get_logs(
            addrBatch,
            hex(nextStart),
            hex(windowEnd),
            transfer_text=transfer_text
        )
        if not _status:
            # 请求失败， 阈值问题处理 -- 待定（源js脚本的逻辑 - 好像不生效）
            if 'allowed block' in logs.get("message"):
                err = f'[失败] error info: {logs.get("message")}; 请减小[BLOCK_STEP] 配置'
                print(err)
                raise ValueError(err)
            return
        if not logs:
            # 块区间没有数据
            pass
        else:
            valid_logs = 0
            bloom_hits = 0
            usd_filtered = 0
            t_futures = []
            running_status = True
            try:
                # 开启多进程 100
                with ThreadPoolExecutor(
                        max_workers=THREAD_NUMBER) as t_executor:
                    for log in logs:
                        t_future = t_executor.submit(
                            exe_logs, currentBatch, log, client, tokens,
                            addrBatch
                        )
                        t_futures.append(t_future)
                    # 等待所有任务完成
                    for t_future in t_futures:
                        v, b, u = t_future.result()
                        valid_logs += v
                        bloom_hits += b
                        usd_filtered += u
                # for log in logs:
                #     exe_logs(currentBatch, log, client, tokens, addrBatch)
            except KeyboardInterrupt:
                running_status = False
                print('执行中断, [多线程] 正在清除未执行程序...')
                redis_instance.set('Interrupt_status', 'yes')
                # 取消所有未完成的任务
                for t_future in t_futures:
                    t_future.cancel()  # 尝试取消任务（对未开始的任务有效）
                t_executor.shutdown(wait=True)  # 等待所有任务完成
                print(f'[进程ID]: {os.getpid()} 的所有 [线程] 清除完毕')
            if logs and running_status:
                print(
                    f"[结果] 原始日志: {len(logs)}, Bloom命中: {bloom_hits}, USD过滤后: {usd_filtered}, 有效记录: {valid_logs}")
    except Exception as e:
        if 'unpack non-iterable NoneType' in f'{e}':
            # 这是ctrl + c 停止程序 - 可能引发的error
            return
        print(f"[错误] 批次 {batchCount} 处理失败: {e}\n")
        save_progress(nextStart, windowEnd, currentBatch, totalWindows,
                      targetEnd, rowsInPart, 'fail')
        return
    # 中断执行
    if redis_instance.get('Interrupt_status') == 'yes':
        return

    # 子线程 - 执行失败
    if redis_instance.hget('currentBatch_execute_status', currentBatch):
        return

    print(f'[完成] 区块 ${nextStart}-${windowEnd} 处理完毕')
    # 成功 - 记录最新的采集块地址 - 追快采集可直接从此位置往下采集
    nextStart = windowEnd + 1
    save_progress(nextStart, windowEnd, currentBatch, totalWindows,
                  targetEnd, rowsInPart, 'true')

    next_windowEnd = nextStart + (currentBlockStep - 1) if nextStart + (
            currentBlockStep - 1) <= targetEnd else targetEnd
    print(f'[下次扫描] 区块 ${nextStart}-${next_windowEnd}')


def exe_logs(currentBatch, log, client, tokens, addrBatch):
    valid_logs = 0
    bloom_hits = 0
    usd_filtered = 0
    try:
        if redis_instance.get('Interrupt_status') == 'yes':
            # 主程序执行中断
            print(f'主程序执行中断, 子进程ID: {os.getpid()}, '
                  f'子线程 ID: {threading.get_ident()},  退出完成。')
            return
        transactionHash = log.get('transactionHash')
        hash_result = client.get_transaction_hash(transactionHash)
        if not hash_result:
            return valid_logs, bloom_hits, usd_filtered
        to_address = ''
        from_address = hash_result.get("from")
        input = hash_result.get('input')
        if not input:
            return valid_logs, bloom_hits, usd_filtered
        if '0x' == input:
            # 常规地址（非合约）
            to_address = hash_result.get('to')
        elif input.startswith('0xa9059cbb'):
            # ERC-20 代币转账
            to_address = f'0x{input[34:74]}'
        elif len(input) > 150:
            # 合约交易
            return valid_logs, bloom_hits, usd_filtered
        if not to_address:
            return valid_logs, bloom_hits, usd_filtered

        # 只要 from/to 命中 user_list（Bloom），就保留
        if not ibloom(from_address.lower()) and not ibloom(to_address.lower()):
            # 兼容老项目bloom 不对称问题
            # 判断address 的类型（常规|ERC-20|合约）
            # _n = 0
            # if client.get_code(from_address):
            #     _n += 1
            #     # 新的address 加入bloom中
            #     ibloom_add(from_address)
            # if client.get_code(to_address):
            #     _n += 1
            #     ibloom_add(to_address)
            # if _n == 0:
            #     bloom_hits += 1
            #     return valid_logs, bloom_hits, usd_filtered

            bloom_hits += 1
            return valid_logs, bloom_hits, usd_filtered

        result = client.get_transaction_receipts(transactionHash)
        if not result.get('logs'):
            return valid_logs, bloom_hits, usd_filtered
        receipts_logs = result.get('logs', [])
        for receipts_log in receipts_logs:
            # 计算价值
            value = receipts_log.get('data')
            if len(value) < 3:
                continue
            elif len(value) > 66:
                value = value[:66]
            elif '0xffffffffffff' in value:
                continue
            _u = usd(log['address'], value, tokens)
            if _u is None or _u < USD_THRESHOLD:
                continue
            usd_filtered += 1

            save_data = {
                'tx_hash': log['transactionHash'],
                'log_index': int(receipts_log['logIndex'], 16),
                'block_number': int(log['blockNumber'], 16),
                'timestamp': int(client.get_timestamp(log['blockNumber']), 16),
                'token_address': log['address'],
                'raw_amount': str(int(value, 16)),
                'usd_value': f"{_u:.6f}",
                'from': from_address,
                'to': to_address,
            }
            write_file(save_data)
            # 成功计数
            valid_logs += 1
            # 只记录一次有效值, 如需记录所有交易，可注销 break
            break
    except Exception as e:
        print(
            f'错误 [exe_logs] 失败,  addrBatch: {addrBatch}, error info:'
            f' {traceback.format_exc()}')
        redis_instance.hset('currentBatch_execute_status', currentBatch,
                            'fail')
    return valid_logs, bloom_hits, usd_filtered
