# -*- encoding: utf-8 -*-

import csv
import functools
import json
import os
import random
import re
import time
from concurrent.futures import ThreadPoolExecutor
from decimal import Decimal
import requests
from lib.cache.redis_cache import redis_instance
from lib.cache.bloom_filter import ibloom_add
from settings import MAX_RETRIES, RETRY_DELAY_BASE, START_BLOCK, \
    BLOCK_STEP, INPUT_DIR, BLOOM_FP, BLOOM_N

"""TODO
    - 通用方法
"""

# 请求超时处理
def requestRetry(max_retries=MAX_RETRIES, retry_delay_base=RETRY_DELAY_BASE):
    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            label = ''
            if 'get_logs' in func.__name__:
                label = f'getLogs {args[2]}-{args[3]} ({len(args[1])} addrs)'
            elif 'get_block_number' in func.__name__:
                label = f'health check'
            for attempt in range(max_retries):
                try:
                    return func(*args, **kwargs)
                except TimeoutError as e:
                    # 指数退避加抖动
                    _t = retry_delay_base * (2 ** attempt) + random.uniform(0, 1)
                    print(f'[重试 {attempt + 1}/{max_retries}] {label}: {e}; 等待 {_t}s')
                    time.sleep(_t)
                except Exception as e:
                    print(f'[错误]: {e}')
                    print(f'自动重试, 当前重试次数: {attempt+1}')
            else:
                print(f'[失败] {label} 在 {max_retries} 次重试后仍然失败')
                raise requests.Timeout('请求超时，已达到最大重试次数。')
        return wrapper
    return decorator


def read_file_to_bloom(file):
    if not file:
        return
    print(f"[bloom] 处理文件: {file}")
    file_path = os.path.join(INPUT_DIR, file)
    with open(file_path, mode='r', encoding='utf-8') as file:
        reader = csv.DictReader(file, skipinitialspace=True)
        for row in reader:
            address = row.get('address')
            if address:
                ibloom_add(address.lower())
    print(f'[bloom] 处理文件 [{file_path}] 完成')


def init_bloom():
    status = redis_instance.hget('init_bloom', 'status')
    if status:
        return
    # 读取目录中的文件并筛选出匹配的文件
    GLOB = re.compile(r"^merged_cleaned_part\d+_of_26\.csv$")
    files = [f for f in os.listdir(INPUT_DIR) if GLOB.match(f)]
    print(
        f"[bloom] 读取到的文件数量: {len(files)}, 目标数量: {BLOOM_N}, 假阳率: {BLOOM_FP}")
    if not files:
        ibloom_add(init=True)
    else:
        t_futures = []
        try:
            # 开启多进程 100
            with ThreadPoolExecutor(max_workers=10) as t_executor:
                for file in files:
                    t_future = t_executor.submit(read_file_to_bloom, file)
                    t_futures.append(t_future)
                # 等待所有任务完成
                for t_future in t_futures:
                    t_future.result()
        except KeyboardInterrupt:
            print('执行中断, [多线程] 正在清除未执行程序...')
            redis_instance.set('Interrupt_status', 'yes')
            # 取消所有未完成的任务
            for t_future in t_futures:
                t_future.cancel()  # 尝试取消任务（对未开始的任务有效）
            t_executor.shutdown(wait=True)  # 等待所有任务完成
            print(f'[进程ID]: {os.getpid()} 的所有 [线程] 清除完毕')
    # 设置状态
    redis_instance.hset('init_bloom', 'status', 'true')


def load_token_data(file_path):
    # 加载Token数据
    TOKENS = {}
    TOKEN_ADDRESSES = []
    try:
        with open(file_path, mode='r', encoding='utf-8') as file:
            reader = csv.DictReader(file, skipinitialspace=True)
            for row in reader:
                token_address = row['token_address']
                TOKENS[token_address] = {
                    'decimals': int(row['decimals'] or 0),
                    'price': float(row['usd_price'] or 0)
                }
                TOKEN_ADDRESSES.append(token_address)
        print(f'[数据] 已加载 {len(TOKEN_ADDRESSES)} 个Token信息')
    except Exception as e:
        print(f'错误 [加载Token数据]: {e}')
    return TOKENS, TOKEN_ADDRESSES


def write_file(data):
    '''TODO
        放入待写队列中
    '''
    redis_instance.rpush('wait_write_data', json.dumps(data))


def resolve_target_end(head, targe_end_raw):
    if str(targe_end_raw).lower() == 'latest':
        return head
    n = int(targe_end_raw)
    return n if n > 0 else head


def chunks(arr, n):
    # 对arr 进行切分, 每个子list 最大长度为n
    return [arr[i:i + n] for i in range(0, len(arr), n)]


def usd(token, raw_hex, tokens):
    # 获取指定 token 的信息
    token_info = tokens.get(token)
    # 如果 token 不存在，返回 None
    if not token_info:
        return None
    # 将十六进制字符串转换为大整数
    value = int(raw_hex, 16)
    # 计算 USD 价值
    usd_value = (Decimal(value) / (10 ** token_info['decimals'])) * Decimal(
        token_info['price'])
    return float(usd_value)


def save_progress(next_start, window_end, current_batch, total_windows,
                  target_end, rows_in_part, status):
    progress = {
        'nextStart': str(next_start),  # BigInt 转字符串保存
        'windowEnd': str(window_end),  # BigInt 转字符串保存
        'currentBatch': current_batch,
        'totalWindows': total_windows,
        'timestamp': int(time.time() * 1000),  # 获取当前时间戳（毫秒）
        'startBlock': START_BLOCK,
        'targetEnd': str(target_end),
        'rowsInPart': rows_in_part,
        'status': status
    }
    # 直接放入 redis 中
    redis_instance.hset('scan_progress', current_batch, json.dumps(progress))
    current_window = (next_start - START_BLOCK) // BLOCK_STEP + 1
    print(
        f"[进度保存] 区块 {next_start}, 窗口 {current_window}/{total_windows}, 批次 {current_batch}"
    )


def clean_progress(current_batch):
    """TODO
        清除采集记录
        - 清除后所有任务都将从创世区开始下载
        - 修复方法： 在final-results目录下放入采集结果
    :param current_batch:
    :return:
    """
    redis_instance.hdel('scan_progress', current_batch)


if __name__ == '__main__':
    init_bloom()