#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
基金净值明细数据下载器

本模块实现了基金净值明细数据的完整下载和管理功能，支持从东方财富网下载基金净值数据并存储到MySQL数据库。

核心功能模块：
1. 数据下载功能
   - 支持串行和并行两种下载模式
   - 从东方财富网API获取基金净值明细数据
   - 支持分页下载和数据批量处理
   - 实现单个基金和批量基金下载

2. 反爬虫机制
   - User-Agent池随机轮换
   - 请求频率控制和随机延迟
   - 会话管理和连接池优化
   - 异常检测和自动重试

3. 状态管理系统
   - 基金下载状态跟踪（pending/downloading/completed/failed）
   - 下载进度实时监控
   - 错误信息记录和重试计数
   - 断点续传支持

4. 数据存储管理
   - 基金净值数据存储到fund_nav_detail_new表
   - 下载状态管理通过fund_scraper_status表
   - 支持数据清理和重新下载
   - 批量插入优化和重复数据处理

5. 统计和监控
   - 实时下载统计（总数、已完成、下载中、待处理、失败）
   - 基金状态列表查询和分页
   - 下载进度百分比计算
   - 净值记录数统计

6. 多线程处理
   - 支持可配置的并行线程数
   - 线程安全的状态更新
   - 资源管理和线程池优化
   - 异常处理和线程清理

数据流程：
- 基金代码来源：fund_info_detail表
- 净值数据存储：fund_nav_detail_new表  
- 状态管理：fund_scraper_status表
- API数据源：东方财富网基金净值接口

支持的操作模式：
- 串行下载：逐个处理基金，适合稳定性要求高的场景
- 并行下载：多线程并发处理，适合大批量数据下载
- 单基金下载：支持指定基金代码的独立下载
- 断点续传：支持从中断点继续下载

Author: zhaozeliang  
Date: 2025-10-11
Version: 2.0
"""

import requests
import pymysql
import time
import re
import json
import random
from datetime import datetime, date
from decimal import Decimal
import logging
from typing import List, Dict, Optional, Tuple, Any
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock, Semaphore
from dataclasses import dataclass
from bs4 import BeautifulSoup
import queue

# 配置日志 - 使用Django日志系统
logger = logging.getLogger(__name__)

@dataclass
class PerformanceStats:
    """性能统计数据类"""
    total_requests: int = 0
    successful_requests: int = 0
    failed_requests: int = 0
    total_records_processed: int = 0
    total_time: float = 0.0
    avg_request_time: float = 0.0
    db_operations: int = 0
    cache_hits: int = 0
    
    def add_request(self, success: bool, duration: float):
        """添加请求统计"""
        self.total_requests += 1
        if success:
            self.successful_requests += 1
        else:
            self.failed_requests += 1
        
        # 更新平均请求时间
        if self.total_requests == 1:
            self.avg_request_time = duration
        else:
            self.avg_request_time = (self.avg_request_time * (self.total_requests - 1) + duration) / self.total_requests
    
    def add_page_processed(self):
        """添加页面处理统计"""
        pass  # 可以在这里添加页面处理统计逻辑
    
    def add_db_operation(self):
        """添加数据库操作统计"""
        self.db_operations += 1
    
    @property
    def records_processed(self) -> int:
        """获取已处理记录数"""
        return self.total_records_processed
    
    @records_processed.setter
    def records_processed(self, value: int):
        """设置已处理记录数"""
        self.total_records_processed = value

class RateLimiter:
    """智能限流器"""
    
    def __init__(self, max_requests_per_second: float = 3.0):
        self.max_requests_per_second = max_requests_per_second
        self.min_interval = 1.0 / max_requests_per_second
        self.last_request_time = 0.0
        self.consecutive_errors = 0
        self.adaptive_delay = 0.0
        self.lock = Lock()
    
    def acquire(self):
        """获取请求许可"""
        with self.lock:
            current_time = time.time()
            
            # 计算需要等待的时间
            time_since_last = current_time - self.last_request_time
            base_delay = max(0, self.min_interval - time_since_last)
            
            # 根据连续错误数动态调整延迟
            total_delay = base_delay + self.adaptive_delay
            
            if total_delay > 0:
                time.sleep(total_delay)
            
            self.last_request_time = time.time()
    
    def on_success(self):
        """请求成功回调"""
        with self.lock:
            self.consecutive_errors = 0
            # 逐渐减少自适应延迟
            self.adaptive_delay = max(0, self.adaptive_delay * 0.9)
    
    def on_error(self):
        """请求失败回调"""
        with self.lock:
            self.consecutive_errors += 1
            # 根据连续错误数增加延迟
            self.adaptive_delay = min(5.0, self.adaptive_delay + 0.5 * self.consecutive_errors)

class DatabaseConnectionPool:
    """数据库连接池"""
    
    def __init__(self, db_config: Dict, pool_size: int = 10):
        self.db_config = db_config
        self.pool_size = pool_size
        self.pool = queue.Queue(maxsize=pool_size)
        self.lock = Lock()
        self._initialize_pool()
    
    def _initialize_pool(self):
        """初始化连接池"""
        for _ in range(self.pool_size):
            try:
                conn = pymysql.connect(**self.db_config)
                self.pool.put(conn)
            except Exception as e:
                logger.error(f"初始化数据库连接失败: {e}")
                raise
    
    def get_connection(self):
        """获取数据库连接"""
        try:
            # 尝试从池中获取连接，超时5秒
            conn = self.pool.get(timeout=5)
            # 检查连接是否有效
            if not conn.open:
                conn = pymysql.connect(**self.db_config)
            return conn
        except queue.Empty:
            # 池中没有可用连接，创建新连接
            return pymysql.connect(**self.db_config)
        except Exception as e:
            logger.error(f"获取数据库连接失败: {e}")
            raise
    
    def return_connection(self, conn):
        """归还数据库连接"""
        try:
            if conn and conn.open:
                self.pool.put_nowait(conn)
        except queue.Full:
            # 池已满，关闭连接
            conn.close()
        except Exception as e:
            logger.error(f"归还数据库连接失败: {e}")
    
    def close_all(self):
        """关闭所有连接"""
        while not self.pool.empty():
            try:
                conn = self.pool.get_nowait()
                conn.close()
            except queue.Empty:
                break

class FundNavDetailScraper:
    """基金净值明细数据下载器"""
    
    def __init__(self, enable_parallel: bool = False, max_workers: int = 10, 
                 enable_high_performance: bool = False, batch_size: int = 1500):
        """初始化下载器
        
        Args:
            enable_parallel: 是否启用并行下载功能
            max_workers: 最大线程数，默认10个（仅在enable_parallel=True时有效）
            enable_high_performance: 是否启用高性能模式
            batch_size: 批量处理大小（仅在enable_high_performance=True时有效）
        """
        self.db_config = {
            'host': 'localhost',
            'user': 'root',
            'password': 'czya31505',
            'database': 'fund',
            'charset': 'utf8mb4',
            'autocommit': True
        }
        
        # 反爬虫机制：User-Agent池
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        ]
        
        self.session = requests.Session()
        self._update_session_headers()
        self.is_running = False
        
        # 多线程相关属性
        self.enable_parallel = enable_parallel
        self.max_workers = min(max_workers, 10)  # 限制最大并发数为10
        self.session_pool = []
        self.data_lock = Lock()
        
        # 高性能模式配置
        self.enable_high_performance = enable_high_performance
        self.batch_size = batch_size
        
        # 高性能模式组件
        if self.enable_high_performance:
            self.performance_stats = PerformanceStats()
            self.rate_limiter = RateLimiter(max_requests_per_second=5.0)
            self.db_pool = DatabaseConnectionPool(self.db_config, pool_size=15)
            self.session_queue = queue.Queue()
            self._init_high_performance_sessions()
        else:
            self.performance_stats = None
            self.rate_limiter = None
            self.db_pool = None
        
        # 反爬虫机制：请求频率控制
        self.request_semaphore = Semaphore(self.max_workers)  # 信号量值与工作线程数匹配
        self.last_request_time = {}  # 记录每个线程的最后请求时间
        
        # 如果启用并行下载，初始化会话池
        if self.enable_parallel:
            self._init_session_pool()
    
    def _init_high_performance_sessions(self):
        """初始化高性能模式的会话池"""
        for _ in range(self.max_workers):
            session = requests.Session()
            session.headers.update(self._get_random_headers())
            self.session_queue.put(session)
    
    def _get_random_headers(self) -> Dict[str, str]:
        """获取随机请求头"""
        return {
            'User-Agent': random.choice(self.user_agents),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
    
    def _get_session(self):
        """获取会话对象"""
        if self.enable_high_performance:
            try:
                return self.session_queue.get(timeout=1)
            except queue.Empty:
                # 如果队列为空，创建新会话
                session = requests.Session()
                session.headers.update(self._get_random_headers())
                return session
        else:
            return self.session
    
    def _return_session(self, session):
        """归还会话对象"""
        if self.enable_high_performance:
            try:
                self.session_queue.put_nowait(session)
            except queue.Full:
                # 队列已满，不归还
                pass
    
    def _update_session_headers(self):
        """更新会话头部信息，随机选择User-Agent"""
        user_agent = random.choice(self.user_agents)
        self.session.headers.update({
            'User-Agent': user_agent,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        })
    
    def _init_session_pool(self):
        """初始化会话池"""
        for _ in range(self.max_workers):
            session = requests.Session()
            session.headers.update({
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
                'Accept-Encoding': 'gzip, deflate',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
            })
            self.session_pool.append(session)
        
    def get_database_connection(self) -> pymysql.Connection:
        """获取数据库连接"""
        try:
            connection = pymysql.connect(**self.db_config)
            return connection
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise
    
    def get_fund_codes(self) -> List[str]:
        """从数据库获取基金代码列表，并预先初始化状态记录"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            cursor.execute("SELECT DISTINCT fund_code FROM fund_info_detail ORDER BY fund_code")
            results = cursor.fetchall()
            fund_codes = [row[0] for row in results]
            logger.info(f"获取到{len(fund_codes)}个基金代码")
            
            # 预先初始化基金状态记录，便于前端统计
            if fund_codes:
                self._initialize_fund_status_records(fund_codes, cursor, connection)
            
            return fund_codes
        except Exception as e:
            logger.error(f"获取基金代码失败: {e}")
            return []
        finally:
            if cursor:
                cursor.close()
            connection.close()
    
    def _initialize_fund_status_records(self, fund_codes: List[str], cursor, connection):
        """批量初始化基金状态记录"""
        try:
            # 使用批量插入来提高性能
            insert_sql = """
            INSERT IGNORE INTO fund_scraper_status 
            (fund_code, data_status, total_records, retry_count, last_update_date, updated_at)
            VALUES (%s, 'pending', 0, 0, CURDATE(), CURRENT_TIMESTAMP)
            """
            
            # 准备批量插入的数据
            batch_data = [(fund_code,) for fund_code in fund_codes]
            
            # 执行批量插入
            cursor.executemany(insert_sql, batch_data)
            affected_rows = cursor.rowcount
            connection.commit()
            
            logger.info(f"预先初始化了{affected_rows}个基金的状态记录")
            
        except Exception as e:
            logger.error(f"初始化基金状态记录失败: {e}")
            connection.rollback()
    
    def _is_valid_nav_value(self, value: str) -> bool:
        """验证净值数据是否为有效的数值格式"""
        if not value or value.strip() == '':
            return False
        
        # 移除可能的空格和特殊字符
        cleaned_value = value.strip()
        
        # 检查是否包含非数值字符（除了小数点和负号）
        if any(char.isalpha() or char in ['%', '元', '万', '亿'] for char in cleaned_value):
            return False
        
        try:
            float(cleaned_value)
            return True
        except ValueError:
            return False
    
    def _is_valid_date(self, date_str: str) -> bool:
        """验证日期格式是否正确"""
        if not date_str or date_str.strip() == '':
            return False
        
        try:
            datetime.strptime(date_str.strip(), '%Y-%m-%d')
            return True
        except ValueError:
            return False

    def get_fund_nav_data(self, fund_code: str, page: int = 1, per_page: int = 20) -> Optional[Tuple[List[Dict], Dict]]:
        """获取基金净值数据
        
        Returns:
            Tuple[List[Dict], Dict]: (净值记录列表, 分页信息字典)
            分页信息包含: {'total_records': int, 'total_pages': int, 'current_page': int}
        """
        if self.enable_high_performance:
            return self._get_fund_nav_data_high_performance(fund_code, page, per_page)
        else:
            return self._get_fund_nav_data_standard(fund_code, page, per_page)
    
    def _get_fund_nav_data_high_performance(self, fund_code: str, page: int = 1, per_page: int = 20) -> Optional[Tuple[List[Dict], Dict]]:
        """高性能版本的基金净值数据获取"""
        start_time = time.time()
        session = self._get_session()
        
        try:
            # 智能限流
            if self.rate_limiter:
                self.rate_limiter.acquire()
            
            # 构建API URL
            url = "http://fund.eastmoney.com/f10/F10DataApi.aspx"
            params = {
                'type': 'lsjz',
                'code': fund_code,
                'page': page,
                'per': per_page,
                'sdate': '',
                'edate': '',
                'rt': str(int(time.time() * 1000))
            }
            
            response = session.get(url, params=params, timeout=30)
            response.raise_for_status()
            
            # 记录成功请求
            if self.rate_limiter:
                self.rate_limiter.on_success()
            if self.performance_stats:
                self.performance_stats.add_request(True, time.time() - start_time)
            
            content = response.text
            
            # 解析分页信息
            pagination_info = self._parse_pagination_info(content, page)
            
            # 解析HTML内容
            nav_records = self._parse_html_content(content)
            
            return nav_records, pagination_info
            
        except Exception as e:
            # 记录失败请求
            if self.rate_limiter:
                self.rate_limiter.on_error()
            if self.performance_stats:
                self.performance_stats.add_request(False, time.time() - start_time)
            
            logger.error(f"高性能模式获取基金{fund_code}净值数据失败: {e}")
            return [], {'total_records': 0, 'total_pages': 0, 'current_page': page}
        finally:
            self._return_session(session)
    
    def _parse_pagination_info(self, content: str, page: int) -> Dict:
        """解析分页信息"""
        pagination_info = {'total_records': 0, 'total_pages': 0, 'current_page': page}
        if 'records:' in content:
            try:
                # 提取总记录数
                records_start = content.find('records:') + 8
                records_end = content.find(',', records_start)
                total_records = int(content[records_start:records_end])
                
                # 提取总页数
                pages_start = content.find('pages:') + 6
                pages_end = content.find(',', pages_start)
                total_pages = int(content[pages_start:pages_end])
                
                # 提取当前页数
                curpage_start = content.find('curpage:') + 8
                curpage_end = content.find('}', curpage_start)
                current_page = int(content[curpage_start:curpage_end])
                
                pagination_info = {
                    'total_records': total_records,
                    'total_pages': total_pages,
                    'current_page': current_page
                }
                
            except (ValueError, IndexError) as e:
                logger.warning(f"解析分页信息失败: {e}")
        
        return pagination_info
    
    def _parse_html_content(self, content: str) -> List[Dict]:
        """解析HTML内容获取净值记录"""
        nav_records = []
        
        # 提取HTML内容
        if 'apidata=' in content:
            start = content.find('content:"') + 9
            end = content.find('",records:')
            html_content = content[start:end]
            
            # 解码HTML实体
            html_content = html_content.replace('\\', '')
            
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            table = soup.find('table')
            
            if table:
                tbody = table.find('tbody')
                rows = []
                if tbody:
                    rows = tbody.find_all('tr')
                
                for row in rows:
                    cells = row.find_all('td')
                    if len(cells) >= 6:
                        # 提取原始数据
                        nav_date = cells[0].text.strip()
                        unit_nav = cells[1].text.strip()
                        cumulative_nav = cells[2].text.strip()
                        daily_growth = cells[3].text.strip()
                        subscription_status = cells[4].text.strip()
                        redemption_status = cells[5].text.strip()
                        dividend_info = cells[6].text.strip() if len(cells) > 6 else ''
                        
                        # 数据清理
                        nav_date = nav_date.rstrip('*').strip()  # 移除日期后的星号
                        unit_nav = unit_nav.replace('%', '').strip()  # 移除百分号
                        cumulative_nav = cumulative_nav.replace('%', '').strip()  # 移除百分号
                        daily_growth = daily_growth.replace('%', '').strip()  # 移除百分号
                        
                        # 特殊处理：如果累计净值为空，尝试使用单位净值
                        if not cumulative_nav and unit_nav:
                            logger.warning(f"累计净值为空，使用单位净值作为累计净值: {unit_nav}")
                            cumulative_nav = unit_nav
                        
                        # 数据验证（在清理之后）
                        # 检查日期格式
                        if not self._is_valid_date(nav_date):
                            logger.warning(f"日期格式异常，跳过该行: {nav_date}")
                            continue
                        
                        # 检查是否是有效的数值格式（在清理之后）
                        if not self._is_valid_nav_value(unit_nav):
                            logger.warning(f"单位净值格式异常，跳过该行: 单位净值={unit_nav}")
                            continue
                        
                        if not self._is_valid_nav_value(cumulative_nav):
                            logger.warning(f"累计净值格式异常，跳过该行: 累计净值={cumulative_nav}")
                            continue
                        
                        record = {
                            'FSRQ': nav_date,
                            'DWJZ': unit_nav,
                            'LJJZ': cumulative_nav,
                            'JZZZL': daily_growth,
                            'SGZT': subscription_status,
                            'SHZT': redemption_status,
                            'FHSP': dividend_info
                        }
                        nav_records.append(record)
        
        return nav_records
    
    def _get_fund_nav_data_standard(self, fund_code: str, page: int = 1, per_page: int = 20) -> Optional[Tuple[List[Dict], Dict]]:
        """标准版本的基金净值数据获取"""
        try:
            # 构建API URL - 使用HTML接口
            url = "http://fund.eastmoney.com/f10/F10DataApi.aspx"
            params = {
                'type': 'lsjz',
                'code': fund_code,
                'page': page,
                'per': per_page,
                'sdate': '',
                'edate': '',
                'rt': str(int(time.time() * 1000))
            }
            
            response = self.session.get(url, params=params, timeout=30)
            response.raise_for_status()
            
            content = response.text
            
            # 提取分页信息
            pagination_info = {'total_records': 0, 'total_pages': 0, 'current_page': page}
            if 'records:' in content:
                try:
                    # 提取总记录数
                    records_start = content.find('records:') + 8
                    records_end = content.find(',', records_start)
                    total_records = int(content[records_start:records_end])
                    
                    # 提取总页数
                    pages_start = content.find('pages:') + 6
                    pages_end = content.find(',', pages_start)
                    total_pages = int(content[pages_start:pages_end])
                    
                    # 提取当前页数
                    curpage_start = content.find('curpage:') + 8
                    curpage_end = content.find('}', curpage_start)
                    current_page = int(content[curpage_start:curpage_end])
                    
                    pagination_info = {
                        'total_records': total_records,
                        'total_pages': total_pages,
                        'current_page': current_page
                    }
                    
                    # logger.info(f"基金{fund_code}分页信息: 总记录{total_records}条, 总页数{total_pages}页, 当前第{current_page}页")
                    
                except (ValueError, IndexError) as e:
                    logger.warning(f"解析分页信息失败: {e}")
            
            # 提取HTML内容
            if 'apidata=' in content:
                start = content.find('content:"') + 9
                end = content.find('",records:')
                html_content = content[start:end]
                
                # 解码HTML实体
                html_content = html_content.replace('\\', '')
                
                # 使用BeautifulSoup解析HTML
                from bs4 import BeautifulSoup
                soup = BeautifulSoup(html_content, 'html.parser')
                table = soup.find('table')
                
                if table:
                    tbody = table.find('tbody')
                    rows = []
                    if tbody:
                        rows = tbody.find_all('tr')
                    
                    nav_records = []
                    for row in rows:
                        cells = row.find_all('td')
                        if len(cells) >= 6:
                            # 提取原始数据
                            nav_date = cells[0].text.strip()
                            unit_nav = cells[1].text.strip()
                            cumulative_nav = cells[2].text.strip()
                            daily_growth = cells[3].text.strip()
                            subscription_status = cells[4].text.strip()
                            redemption_status = cells[5].text.strip()
                            dividend_info = cells[6].text.strip() if len(cells) > 6 else ''
                            
                            # 调试信息：记录原始提取的数据
                            # logger.debug(f"原始数据提取 - 日期: '{nav_date}', 单位净值: '{unit_nav}', 累计净值: '{cumulative_nav}', 日增长率: '{daily_growth}'")
                            
                            # 数据清理
                            nav_date = nav_date.rstrip('*').strip()  # 移除日期后的星号
                            unit_nav = unit_nav.replace('%', '').strip()  # 移除百分号
                            cumulative_nav = cumulative_nav.replace('%', '').strip()  # 移除百分号
                            daily_growth = daily_growth.replace('%', '').strip()  # 移除百分号
                            
                            # 特殊处理：如果累计净值为空，尝试使用单位净值
                            if not cumulative_nav and unit_nav:
                                logger.warning(f"累计净值为空，使用单位净值作为累计净值: {unit_nav}")
                                cumulative_nav = unit_nav
                            
                            # 数据验证（在清理之后）
                            # 检查日期格式
                            if not self._is_valid_date(nav_date):
                                logger.warning(f"日期格式异常，跳过该行: {nav_date}")
                                continue
                            
                            # 检查是否是有效的数值格式（在清理之后）
                            if not self._is_valid_nav_value(unit_nav):
                                logger.warning(f"单位净值格式异常，跳过该行: 单位净值={unit_nav}")
                                continue
                            
                            if not self._is_valid_nav_value(cumulative_nav):
                                logger.warning(f"累计净值格式异常，跳过该行: 累计净值={cumulative_nav}")
                                continue
                            
                            record = {
                                'FSRQ': nav_date,
                                'DWJZ': unit_nav,
                                'LJJZ': cumulative_nav,
                                'JZZZL': daily_growth,
                                'SGZT': subscription_status,
                                'SHZT': redemption_status,
                                'FHSP': dividend_info
                            }
                            nav_records.append(record)
                    
                    # logger.info(f"成功获取基金{fund_code}第{page}页的{len(nav_records)}条净值数据")
                    return nav_records, pagination_info
                else:
                    logger.error(f"未找到基金{fund_code}的数据表格")
                    return None, pagination_info
            else:
                logger.error(f"未找到基金{fund_code}的apidata")
                return None, pagination_info
            
        except Exception as e:
            logger.error(f"获取基金{fund_code}净值数据失败: {e}")
            return None, {'total_records': 0, 'total_pages': 0, 'current_page': page}
    
    def parse_nav_data(self, raw_data: List[Dict]) -> List[Dict]:
        """解析净值数据"""
        nav_records = []
        
        if not raw_data:
            return nav_records
        
        for item in raw_data:
            try:
                # 解析数据
                nav_date_str = item.get('FSRQ', '').strip()
                unit_nav_str = item.get('DWJZ', '').strip()
                cumulative_nav_str = item.get('LJJZ', '').strip()
                daily_growth_str = item.get('JZZZL', '').strip()
                subscription_status = item.get('SGZT', '').strip()
                redemption_status = item.get('SHZT', '').strip()
                dividend_info = item.get('FHSP', '').strip()
                
                # 数据完整性检查
                if not nav_date_str or not unit_nav_str or not cumulative_nav_str:
                    logger.warning(f"关键数据缺失，跳过: {item}")
                    continue
                
                # 检查数据是否错位（通过检查净值字段是否包含非数值内容）
                if not self._is_valid_nav_value(unit_nav_str) or not self._is_valid_nav_value(cumulative_nav_str.replace('%', '')):
                    logger.warning(f"数据字段错位或格式异常，跳过: 单位净值={unit_nav_str}, 累计净值={cumulative_nav_str}")
                    continue
                
                # 数据清理和转换
                # 清理日期字符串（移除星号等特殊字符）
                nav_date_str = nav_date_str.rstrip('*').strip()
                
                # 清理数值字符串（移除百分号等）
                unit_nav_str = unit_nav_str.replace('%', '').strip()
                cumulative_nav_str = cumulative_nav_str.replace('%', '').strip()
                daily_growth_str = daily_growth_str.replace('%', '').strip()
                
                # 转换数据类型
                nav_date = datetime.strptime(nav_date_str, '%Y-%m-%d').date()
                unit_nav = Decimal(unit_nav_str)
                cumulative_nav = Decimal(cumulative_nav_str)
                
                # 处理日增长率
                if daily_growth_str == '--' or daily_growth_str == '' or not self._is_valid_nav_value(daily_growth_str):
                    daily_growth_rate = Decimal('0.0000')
                else:
                    daily_growth_rate = Decimal(daily_growth_str)
                
                # 处理分红送配信息
                if not dividend_info or dividend_info == '--':
                    dividend_info = None
                
                nav_record = {
                    'nav_date': nav_date,
                    'unit_nav': unit_nav,
                    'cumulative_nav': cumulative_nav,
                    'daily_growth_rate': daily_growth_rate,
                    'subscription_status': subscription_status,
                    'redemption_status': redemption_status,
                    'dividend_distribution': dividend_info,
                    'last_update_day': date.today()
                }
                
                nav_records.append(nav_record)
                
            except Exception as e:
                logger.error(f"解析净值数据失败: {e}, 数据: {item}")
                continue
        
        return nav_records
    
    def save_nav_data(self, fund_code: str, nav_records: List[Dict]) -> int:
        """保存净值数据到数据库"""
        if not nav_records:
            return 0
        
        connection = self.get_database_connection()
        saved_count = 0
        cursor = None
        
        try:
            cursor = connection.cursor()
            
            # 准备插入SQL
            insert_sql = """
            INSERT INTO fund_nav_detail_new 
            (fund_code, nav_date, unit_nav, accumulated_nav, daily_growth_rate, 
             purchase_status, redemption_status, dividend_per_unit)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
            ON DUPLICATE KEY UPDATE
            unit_nav = VALUES(unit_nav),
            accumulated_nav = VALUES(accumulated_nav),
            daily_growth_rate = VALUES(daily_growth_rate),
            purchase_status = VALUES(purchase_status),
            redemption_status = VALUES(redemption_status),
            dividend_per_unit = VALUES(dividend_per_unit),
            updated_at = CURRENT_TIMESTAMP
            """
            
            # 批量插入数据
            for record in nav_records:
                cursor.execute(insert_sql, (
                    fund_code,
                    record['nav_date'],
                    record['unit_nav'],
                    record['cumulative_nav'],
                    record['daily_growth_rate'],
                    record['subscription_status'],
                    record['redemption_status'],
                    record['dividend_distribution']
                ))
                saved_count += cursor.rowcount
            
            connection.commit()
            #logger.info(f"基金{fund_code}保存了{saved_count}条净值数据")
            
        except Exception as e:
            connection.rollback()
            logger.error(f"保存基金{fund_code}净值数据失败: {e}")
            saved_count = 0
        finally:
            if cursor:
                cursor.close()
            connection.close()
        
        return saved_count
    
    def update_scraper_status(self, fund_code: str, status: str, total_records: int = 0, 
                            error_message: Optional[str] = None, increment_retry: bool = False):
        """更新爬虫状态"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            
            if increment_retry:
                # 使用INSERT ON DUPLICATE KEY UPDATE确保记录存在，并增加重试次数
                update_sql = """
                INSERT INTO fund_scraper_status 
                (fund_code, data_status, total_records, error_message, retry_count, last_update_date, updated_at)
                VALUES (%s, %s, %s, %s, 1, CURDATE(), CURRENT_TIMESTAMP)
                ON DUPLICATE KEY UPDATE
                data_status = VALUES(data_status),
                total_records = VALUES(total_records),
                error_message = VALUES(error_message),
                retry_count = retry_count + 1,
                last_update_date = VALUES(last_update_date),
                updated_at = VALUES(updated_at)
                """
                cursor.execute(update_sql, (fund_code, status, total_records, error_message))
            else:
                # 使用INSERT ON DUPLICATE KEY UPDATE确保记录存在
                update_sql = """
                INSERT INTO fund_scraper_status 
                (fund_code, data_status, total_records, error_message, retry_count, last_update_date, updated_at)
                VALUES (%s, %s, %s, %s, 0, CURDATE(), CURRENT_TIMESTAMP)
                ON DUPLICATE KEY UPDATE
                data_status = VALUES(data_status),
                total_records = VALUES(total_records),
                error_message = VALUES(error_message),
                last_update_date = VALUES(last_update_date),
                updated_at = VALUES(updated_at)
                """
                cursor.execute(update_sql, (fund_code, status, total_records, error_message))
            
            connection.commit()
            
        except Exception as e:
            logger.error(f"更新基金{fund_code}状态失败: {e}")
        finally:
            if cursor:
                cursor.close()
            connection.close()
    
    def get_fund_record_count(self, fund_code: str) -> int:
        """获取基金在数据库中的实际记录数"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            cursor.execute("SELECT COUNT(*) FROM fund_nav_detail_new WHERE fund_code = %s", (fund_code,))
            result = cursor.fetchone()
            return result[0] if result else 0
        except Exception as e:
            logger.error(f"查询基金{fund_code}记录数失败: {e}")
            return 0
        finally:
            if cursor:
                cursor.close()
            connection.close()
    
    def download_fund_nav_data(self, fund_code: str, restart: bool = False, standalone: bool = False) -> bool:
        """下载单个基金的净值数据
        
        Args:
            fund_code: 基金代码
            restart: 是否重新下载（清除旧数据）
            standalone: 是否独立运行模式（不检查is_running标志）
        """
        if self.enable_high_performance:
            return self._download_fund_nav_data_high_performance(fund_code, restart, standalone)
        else:
            return self._download_fund_nav_data_standard(fund_code, restart, standalone)
    
    def _download_fund_nav_data_high_performance(self, fund_code: str, restart: bool = False, standalone: bool = False) -> bool:
        """高性能版本的基金净值数据下载"""
        logger.info(f"开始高性能下载基金{fund_code}的净值数据")
        start_time = time.time()
        
        # 更新状态为下载中
        self.update_scraper_status(fund_code, 'downloading')
        
        try:
            # 如果是重新下载，先清除旧数据
            if restart:
                self.clear_fund_nav_data(fund_code)
            
            # 使用并发下载
            all_records = []
            
            # 首先获取第一页以确定总页数
            result = self.get_fund_nav_data(fund_code, 1, 20)
            if not result or not result[0]:
                logger.warning(f"基金{fund_code}无法获取数据")
                self.update_scraper_status(fund_code, 'failed', 0, "无法获取数据")
                return False
            
            first_page_data, pagination_info = result
            total_pages = pagination_info.get('total_pages', 1)
            total_records = pagination_info.get('total_records', 0)
            
            logger.info(f"基金{fund_code}总共有{total_records}条记录，分{total_pages}页")
            
            # 解析第一页数据
            nav_records = self.parse_nav_data(first_page_data)
            all_records.extend(nav_records)
            
            # 如果有多页，使用线程池并发下载
            if total_pages > 1:
                from concurrent.futures import ThreadPoolExecutor, as_completed
                
                with ThreadPoolExecutor(max_workers=min(self.max_workers, total_pages)) as executor:
                    # 提交剩余页面的下载任务
                    future_to_page = {}
                    for page in range(2, total_pages + 1):
                        if not standalone and not self.is_running:
                            break
                        future = executor.submit(self._download_single_page, fund_code, page, 20)
                        future_to_page[future] = page
                    
                    # 收集结果
                    for future in as_completed(future_to_page):
                        page = future_to_page[future]
                        try:
                            page_records = future.result()
                            if page_records:
                                all_records.extend(page_records)
                                if self.performance_stats:
                                    self.performance_stats.add_page_processed()
                        except Exception as e:
                            logger.error(f"基金{fund_code}第{page}页下载失败: {e}")
            
            # 批量保存数据
            if all_records:
                saved_count = self._batch_save_nav_data(fund_code, all_records)
                
                # 查询数据库获取实际的记录数
                actual_count = self.get_fund_record_count(fund_code)
                
                # 更新状态为完成
                self.update_scraper_status(fund_code, 'completed', actual_count)
                
                # 记录性能统计
                if self.performance_stats:
                    self.performance_stats.total_time = time.time() - start_time
                    self.performance_stats.records_processed = len(all_records)
                
                logger.info(f"基金{fund_code}高性能下载完成，保存{saved_count}条数据，数据库中共有{actual_count}条数据，耗时{time.time() - start_time:.2f}秒")
                return True
            else:
                logger.warning(f"基金{fund_code}没有获取到任何数据")
                self.update_scraper_status(fund_code, 'completed', 0)
                return False
                
        except Exception as e:
            error_msg = f"高性能下载失败: {str(e)}"
            logger.error(f"基金{fund_code}{error_msg}")
            self.update_scraper_status(fund_code, 'failed', 0, error_msg, increment_retry=True)
            return False
    
    def _download_single_page(self, fund_code: str, page: int, per_page: int) -> List[Dict]:
        """下载单页数据"""
        try:
            result = self.get_fund_nav_data(fund_code, page, per_page)
            if result and result[0]:
                return self.parse_nav_data(result[0])
            return []
        except Exception as e:
            logger.error(f"下载基金{fund_code}第{page}页失败: {e}")
            return []
    
    def _batch_save_nav_data(self, fund_code: str, nav_records: List[Dict]) -> int:
        """批量保存净值数据"""
        if not nav_records:
            return 0
        
        if self.enable_high_performance and self.db_pool:
            # 使用连接池进行批量保存
            connection = self.db_pool.get_connection()
            try:
                cursor = connection.cursor()
                
                # 批量插入SQL
                insert_sql = """
                INSERT IGNORE INTO fund_nav_detail 
                (fund_code, nav_date, unit_nav, cumulative_nav, daily_growth_rate, 
                 subscription_status, redemption_status, dividend_info, created_at, updated_at)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
                """
                
                # 准备批量数据
                batch_data = []
                for record in nav_records:
                    try:
                        nav_date = datetime.strptime(record['FSRQ'], '%Y-%m-%d').date()
                        unit_nav = float(record['DWJZ']) if record['DWJZ'] else None
                        cumulative_nav = float(record['LJJZ']) if record['LJJZ'] else None
                        daily_growth = float(record['JZZZL']) if record['JZZZL'] else None
                        
                        batch_data.append((
                            fund_code, nav_date, unit_nav, cumulative_nav, daily_growth,
                            record.get('SGZT', ''), record.get('SHZT', ''), record.get('FHSP', '')
                        ))
                    except (ValueError, KeyError) as e:
                        logger.warning(f"数据格式错误，跳过: {record}, 错误: {e}")
                        continue
                
                # 执行批量插入
                if batch_data:
                    cursor.executemany(insert_sql, batch_data)
                    connection.commit()
                    
                    if self.performance_stats:
                        self.performance_stats.add_db_operation()
                    
                    logger.info(f"基金{fund_code}批量保存{len(batch_data)}条数据")
                    return len(batch_data)
                
                return 0
                
            except Exception as e:
                logger.error(f"批量保存基金{fund_code}数据失败: {e}")
                connection.rollback()
                return 0
            finally:
                self.db_pool.return_connection(connection)
        else:
            # 使用标准方式保存
            return self.save_nav_data(fund_code, nav_records)
    
    def get_performance_stats(self) -> Dict[str, Any]:
        """获取性能统计信息"""
        if self.enable_high_performance and self.performance_stats:
            return {
                'total_requests': self.performance_stats.total_requests,
                'successful_requests': self.performance_stats.successful_requests,
                'failed_requests': self.performance_stats.failed_requests,
                'success_rate': (self.performance_stats.successful_requests / self.performance_stats.total_requests * 100) if self.performance_stats.total_requests > 0 else 0.0,
                'average_request_time': self.performance_stats.avg_request_time,
                'total_time': self.performance_stats.total_time,
                'records_processed': self.performance_stats.total_records_processed,
                'db_operations': self.performance_stats.db_operations,
                'cache_hits': self.performance_stats.cache_hits
            }
        else:
            return {
                'message': '高性能模式未启用',
                'enable_high_performance': self.enable_high_performance
            }
    
    def _download_fund_nav_data_standard(self, fund_code: str, restart: bool = False, standalone: bool = False) -> bool:
        """标准版本的基金净值数据下载"""
        logger.info(f"开始下载基金{fund_code}的净值数据")
        
        # 更新状态为下载中
        self.update_scraper_status(fund_code, 'downloading')
        
        try:
            total_saved = 0
            page = 1
            per_page = 20  # 使用20条每页，与API实际返回数量匹配
            total_pages = None  # 总页数，从第一次API调用中获取
            
            # 如果是重新下载，先清除旧数据
            if restart:
                self.clear_fund_nav_data(fund_code)
            
            while True:
                # 只在非独立模式下检查is_running标志
                if not standalone and not self.is_running:
                    logger.info(f"下载被停止，基金{fund_code}")
                    break
                
                # 获取数据和分页信息
                result = self.get_fund_nav_data(fund_code, page, per_page)
                if not result or result[0] is None:
                    logger.info(f"基金{fund_code}第{page}页没有数据，下载完成")
                    break
                
                raw_data, pagination_info = result
                
                # 从第一页获取总页数信息
                if total_pages is None and pagination_info.get('total_pages', 0) > 0:
                    total_pages = pagination_info['total_pages']
                    total_records = pagination_info['total_records']
                    logger.info(f"基金{fund_code}总共有{total_records}条记录，分{total_pages}页")
                
                # 解析数据
                nav_records = self.parse_nav_data(raw_data)
                if not nav_records:
                    logger.info(f"基金{fund_code}第{page}页解析失败，跳过")
                else:
                    # 保存数据
                    saved_count = self.save_nav_data(fund_code, nav_records)
                    total_saved += saved_count
                    
                    logger.info(f"基金{fund_code}第{page}页保存{saved_count}条数据 (进度: {page}/{total_pages or '?'})")
                
                # 检查是否已到达最后一页
                if total_pages and page >= total_pages:
                    logger.info(f"基金{fund_code}已下载完所有{total_pages}页数据")
                    break
                
                # 如果没有获取到总页数信息，使用原来的逻辑作为备用
                if total_pages is None and len(nav_records) < per_page:
                    logger.info(f"基金{fund_code}第{page}页数据不足{per_page}条，推测已获取所有数据")
                    break
                
                page += 1
                time.sleep(1)  # 避免请求过于频繁
            
            # 查询数据库获取实际的记录数
            actual_count = self.get_fund_record_count(fund_code)
            
            # 更新状态为完成
            self.update_scraper_status(fund_code, 'completed', actual_count)
            logger.info(f"基金{fund_code}下载完成，数据库中共有{actual_count}条数据")
            return True
            
        except Exception as e:
            error_msg = f"下载失败: {str(e)}"
            logger.error(f"基金{fund_code}{error_msg}")
            self.update_scraper_status(fund_code, 'failed', 0, error_msg, increment_retry=True)
            return False
    
    def clear_fund_nav_data(self, fund_code: str):
        """清除基金的净值数据"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            cursor.execute("DELETE FROM fund_nav_detail_new WHERE fund_code = %s", (fund_code,))
            connection.commit()
            logger.info(f"清除基金{fund_code}的旧净值数据")
        except Exception as e:
            logger.error(f"清除基金{fund_code}数据失败: {e}")
        finally:
            if cursor:
                cursor.close()
            connection.close()
    
    def _apply_anti_crawler_delay(self, thread_id: str):
        """应用反爬虫延迟机制"""
        current_time = time.time()
        
        # 检查上次请求时间，确保间隔至少1-3秒
        if thread_id in self.last_request_time:
            time_diff = current_time - self.last_request_time[thread_id]
            min_delay = random.uniform(1.0, 3.0)  # 随机延迟1-3秒
            if time_diff < min_delay:
                sleep_time = min_delay - time_diff
                time.sleep(sleep_time)
        
        self.last_request_time[thread_id] = time.time()
    
    def _get_random_session(self) -> requests.Session:
        """获取随机会话并更新User-Agent"""
        if self.enable_parallel and self.session_pool:
            session = random.choice(self.session_pool)
        else:
            session = self.session
        
        # 随机更新User-Agent
        user_agent = random.choice(self.user_agents)
        session.headers.update({'User-Agent': user_agent})
        return session
    
    def download_page_worker(self, fund_code: str, page: int, session: requests.Session) -> Tuple[int, List[Dict]]:
        """单页下载工作函数（用于多线程）"""
        try:
            # 使用现有的get_fund_nav_data方法获取数据
            result = self.get_fund_nav_data(fund_code, page, 20)
            if result and result[0] is not None:
                raw_data, page_info = result
                # 解析数据
                nav_records = self.parse_nav_data(raw_data)
                return page, nav_records
            else:
                logger.warning(f"基金{fund_code}第{page}页没有数据")
                return page, []
            
        except Exception as e:
            logger.error(f"下载基金{fund_code}第{page}页失败: {e}")
            return page, []
    
    def download_fund_nav_data_single(self, fund_code: str, batch_save: bool = True, batch_size: int = 1000) -> Tuple[int, List[Dict]]:
        """串行下载单个基金的所有净值数据（新的实现方式）
        
        Args:
            fund_code: 基金代码
            batch_save: 是否启用批量保存，默认True
            batch_size: 批量保存大小，默认1000条
            
        Returns:
            Tuple[总数据条数, 数据列表]
        """
        try:
            thread_id = f"single_{fund_code}"
            session = self._get_random_session()
            all_data = []
            page = 1
            total_saved = 0
            
            while self.is_running:
                # 应用反爬虫延迟
                self._apply_anti_crawler_delay(thread_id)
                
                # 使用信号量控制并发请求
                with self.request_semaphore:
                    result = self.get_fund_nav_data(fund_code, page)
                    if result:
                        page_data, stats = result
                    else:
                        page_data, stats = [], {'total_pages': 0}
                
                if not page_data:
                    break
                
                all_data.extend(page_data)
                
                # 如果启用批量保存且数据量达到批次大小，则保存数据
                if batch_save and len(all_data) >= batch_size:
                    saved_count = self.save_nav_data_batch(fund_code, all_data[:batch_size], batch_size)
                    total_saved += saved_count
                    # 保留未保存的数据
                    all_data = all_data[batch_size:]
                
                # 检查是否还有更多页面
                total_pages = stats.get('total_pages', 1)
                if page >= total_pages:
                    break
                
                page += 1
            
            # 保存剩余的数据
            if batch_save and all_data:
                saved_count = self.save_nav_data_batch(fund_code, all_data, len(all_data))
                total_saved += saved_count
                # 如果启用批量保存，返回已保存的数据总数和空列表（因为数据已保存）
                return total_saved, []
            
            # 如果未启用批量保存，返回所有收集的数据
            return len(all_data), all_data
            
        except Exception as e:
            logger.error(f"基金 {fund_code} 下载失败: {str(e)}")
            return 0, []
    
    def download_fund_nav_data_parallel(self, fund_code: str, restart: bool = False) -> bool:
        """并行下载单个基金的净值数据"""
        if not self.enable_parallel:
            # 如果未启用并行下载，回退到原有方法
            return self.download_fund_nav_data(fund_code, restart)
        
        try:
            logger.info(f"开始并行下载基金{fund_code}的净值数据")
            
            # 如果是重新开始，清除旧数据
            if restart:
                self.clear_fund_nav_data(fund_code)
            
            # 更新状态为下载中
            self.update_scraper_status(fund_code, 'downloading')
            
            # 首先获取第一页数据以确定总页数
            result = self.get_fund_nav_data(fund_code, 1)
            if not result:
                logger.error(f"无法获取基金{fund_code}的第一页数据")
                self.update_scraper_status(fund_code, 'failed', 0, "无法获取第一页数据", increment_retry=True)
                return False
            
            first_page_data, page_info = result
            
            total_pages = page_info.get('total_pages', 1)
            logger.info(f"基金{fund_code}总共有{total_pages}页数据")
            
            # 准备所有页面的下载任务
            all_nav_records = []
            
            # 使用线程池并行下载所有页面
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                # 提交所有下载任务
                future_to_page = {}
                for page in range(1, total_pages + 1):
                    session = self.session_pool[page % len(self.session_pool)]
                    future = executor.submit(self.download_page_worker, fund_code, page, session)
                    future_to_page[future] = page
                
                # 收集结果
                page_results = {}
                for future in as_completed(future_to_page):
                    page = future_to_page[future]
                    try:
                        page_num, nav_records = future.result()
                        page_results[page_num] = nav_records
                        logger.info(f"基金{fund_code}第{page_num}页下载完成，获取{len(nav_records)}条数据")
                    except Exception as e:
                        logger.error(f"基金{fund_code}第{page}页下载失败: {e}")
                        page_results[page] = []
            
            # 按页面顺序合并所有数据
            for page in range(1, total_pages + 1):
                if page in page_results:
                    all_nav_records.extend(page_results[page])
            
            # 批量保存所有数据
            self.save_nav_data_batch(fund_code, all_nav_records)
            
            # 查询数据库获取实际的记录数
            actual_count = self.get_fund_record_count(fund_code)
            
            # 更新状态为完成
            self.update_scraper_status(fund_code, 'completed', actual_count)
            logger.info(f"基金{fund_code}并行下载完成，数据库中共有{actual_count}条数据")
            return True
            
        except Exception as e:
            error_msg = f"并行下载失败: {str(e)}"
            logger.error(f"基金{fund_code}{error_msg}")
            self.update_scraper_status(fund_code, 'failed', 0, error_msg, increment_retry=True)
            return False
    
    def save_nav_data_batch(self, fund_code: str, nav_records: List[Dict], batch_size: int = 1000) -> int:
        """批量保存净值数据到数据库（优化版本）
        
        Args:
            fund_code: 基金代码
            nav_records: 净值记录列表
            batch_size: 批量保存大小，默认1000条
            
        Returns:
            int: 实际保存的记录数
        """
        if not nav_records:
            return 0
        
        total_saved = 0
        total_records = len(nav_records)
        
        # 按批次处理数据
        for i in range(0, total_records, batch_size):
            batch_records = nav_records[i:i + batch_size]
            batch_num = (i // batch_size) + 1
            total_batches = (total_records + batch_size - 1) // batch_size
            
            connection = self.get_database_connection()
            saved_count = 0
            cursor = None
            
            try:
                cursor = connection.cursor()
                
                # 准备批量插入SQL
                insert_sql = """
                INSERT INTO fund_nav_detail_new 
                (fund_code, nav_date, unit_nav, accumulated_nav, daily_growth_rate, 
                 purchase_status, redemption_status, dividend_per_unit)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
                ON DUPLICATE KEY UPDATE
                unit_nav = VALUES(unit_nav),
                accumulated_nav = VALUES(accumulated_nav),
                daily_growth_rate = VALUES(daily_growth_rate),
                purchase_status = VALUES(purchase_status),
                redemption_status = VALUES(redemption_status),
                dividend_per_unit = VALUES(dividend_per_unit),
                updated_at = CURRENT_TIMESTAMP
                """
                
                # 准备批量数据
                batch_data = []
                for record in batch_records:
                    batch_data.append((
                        fund_code,
                        record['nav_date'],
                        record['unit_nav'],
                        record['cumulative_nav'],
                        record['daily_growth_rate'],
                        record['subscription_status'],
                        record['redemption_status'],
                        record['dividend_distribution']
                    ))
                
                # 批量执行插入
                cursor.executemany(insert_sql, batch_data)
                saved_count = cursor.rowcount
                connection.commit()
                total_saved += saved_count
                
                logger.info(f"基金{fund_code}批次{batch_num}/{total_batches}保存了{saved_count}条净值数据，累计{total_saved}条")
                
            except Exception as e:
                connection.rollback()
                logger.error(f"批量保存基金{fund_code}净值数据失败（批次{batch_num}）: {e}")
            finally:
                if cursor:
                    cursor.close()
                connection.close()
        
        logger.info(f"基金{fund_code}批量保存完成，总共保存{total_saved}条净值数据")
        return total_saved
    
    def start_download(self, restart: bool = False, fund_codes: Optional[List[str]] = None):
        """开始下载基金数据
        
        Args:
            restart: 是否重新开始下载
            fund_codes: 指定要下载的基金代码列表，如果为None则下载所有基金
        """
        if self.is_running:
            logger.warning("下载任务已在运行中")
            return
        
        self.is_running = True
        logger.info("开始下载基金净值数据")
        
        try:
            # 如果没有指定基金代码，则获取所有基金代码
            if fund_codes is None:
                fund_codes = self.get_fund_codes()
            
            total_funds = len(fund_codes)
            logger.info(f"获取到{total_funds}个基金代码")
            logger.info(f"获取到{total_funds}个基金代码，准备开始下载")
            
            if self.enable_parallel:
                # 并行下载多个基金
                logger.info(f"启用并行模式，使用{self.max_workers}个工作线程")
                self._download_funds_parallel(fund_codes, restart)
            else:
                # 串行下载
                logger.info("使用串行模式下载")
                self._download_funds_serial(fund_codes, restart)
            
            logger.info("所有基金数据下载完成")
            
        except Exception as e:
            logger.error(f"下载过程中发生错误: {e}")
        finally:
            self.is_running = False
    
    def _download_funds_serial(self, fund_codes: List[str], restart: bool = False):
        """串行下载基金数据"""
        total_funds = len(fund_codes)
        
        for i, fund_code in enumerate(fund_codes, 1):
            if not self.is_running:
                logger.info("下载任务被停止")
                break
            
            logger.info(f"正在处理第{i}/{total_funds}个基金: {fund_code}")
            self.download_fund_nav_data(fund_code, restart)
            
            # 避免请求过于频繁
            time.sleep(2)
    
    def _download_funds_parallel(self, fund_codes: List[str], restart: bool = False):
        """并行下载基金数据"""
        total_funds = len(fund_codes)
        completed_count = 0
        
        try:
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                # 提交所有基金的下载任务
                future_to_fund = {}
                for fund_code in fund_codes:
                    if not self.is_running:
                        logger.info("下载任务在提交阶段被停止")
                        break
                    
                    try:
                        future = executor.submit(self._download_single_fund_worker, fund_code, restart)
                        future_to_fund[future] = fund_code
                    except RuntimeError as e:
                        if "cannot schedule new futures after interpreter shutdown" in str(e):
                            logger.warning(f"解释器正在关闭，停止提交新任务: {e}")
                            break
                        else:
                            raise
                
                # 处理完成的任务
                try:
                    for future in as_completed(future_to_fund):
                        if not self.is_running:
                            logger.info("下载任务在处理阶段被停止")
                            break
                        
                        fund_code = future_to_fund[future]
                        completed_count += 1
                        
                        try:
                            success = future.result(timeout=30)  # 添加超时
                            status = "成功" if success else "失败"
                            logger.info(f"基金{fund_code}下载{status} ({completed_count}/{total_funds})")
                        except Exception as e:
                            logger.error(f"基金{fund_code}下载异常: {e}")
                            
                except RuntimeError as e:
                    if "cannot schedule new futures after interpreter shutdown" in str(e):
                        logger.warning(f"解释器正在关闭，停止处理任务: {e}")
                    else:
                        raise
                        
        except Exception as e:
            logger.error(f"并行下载过程中发生错误: {e}")
            raise
    
    def _download_single_fund_worker(self, fund_code: str, restart: bool = False) -> bool:
        """单个基金下载工作函数（修复版本）"""
        start_time = time.time()
        
        try:
            thread_id = f"worker_{fund_code}_{threading.current_thread().ident}"
            
            # 应用反爬虫延迟
            self._apply_anti_crawler_delay(thread_id)
            
            # 更新状态为下载中
            self.update_scraper_status(fund_code, 'downloading')
            
            # 如果是重新下载，先清除旧数据
            if restart:
                self.clear_fund_nav_data(fund_code)
            
            # 使用修复后的下载方法
            saved_count = self._download_fund_parallel_fixed(fund_code)
            
            end_time = time.time()
            elapsed_time = end_time - start_time
            
            if saved_count > 0:
                # 更新状态为完成
                self.update_scraper_status(fund_code, 'completed', saved_count)
                logger.info(f"基金{fund_code}下载完成，保存{saved_count}条数据，总耗时{elapsed_time:.2f}秒")
                return True
            else:
                self.update_scraper_status(fund_code, 'failed', 0, "未获取到数据")
                logger.info(f"基金{fund_code}下载失败，未获取到数据，总耗时{elapsed_time:.2f}秒")
                return False
                    
        except Exception as e:
            end_time = time.time()
            elapsed_time = end_time - start_time
            error_msg = f"下载异常: {str(e)}"
            logger.error(f"基金{fund_code}下载失败，{error_msg}，总耗时{elapsed_time:.2f}秒")
            self.update_scraper_status(fund_code, 'failed', 0, error_msg)
            return False
    
    def _download_fund_parallel_fixed(self, fund_code: str) -> int:
        """修复后的并行下载方法，避免信号量冲突"""
        try:
            total_saved = 0
            page = 1
            per_page = 20
            
            while self.is_running:
                # 直接调用API，不使用信号量（避免冲突）
                result = self.get_fund_nav_data(fund_code, page, per_page)
                if not result or result[0] is None:
                    break
                
                raw_data, pagination_info = result
                
                # 解析数据
                nav_records = self.parse_nav_data(raw_data)
                
                if not nav_records:
                    break
                
                # 立即保存数据（类似串行模式）
                saved_count = self.save_nav_data(fund_code, nav_records)
                total_saved += saved_count
                
                # 检查是否还有更多页面
                total_pages = pagination_info.get('total_pages', 1)
                if page >= total_pages:
                    break
                
                page += 1
                
                # 添加延迟避免请求过快
                time.sleep(random.uniform(0.5, 1.5))
            
            return total_saved
            
        except Exception as e:
            logger.error(f"基金{fund_code}修复版下载失败: {e}")
            return 0

    def stop_download(self):
        """停止下载"""
        self.is_running = False
        logger.info("下载任务已停止")
        
        # 给线程池一些时间来优雅关闭
        import time
        time.sleep(0.5)
    
    def continue_download(self):
        """继续下载（断点续传）"""
        if self.is_running:
            logger.warning("下载任务已在运行中")
            return
        
        self.is_running = True
        download_mode = "并行" if self.enable_parallel else "串行"
        logger.info(f"开始断点续传下载 - 使用{download_mode}模式")
        
        try:
            # 获取未完成的基金代码
            connection = self.get_database_connection()
            cursor = connection.cursor()
            
            cursor.execute("""
                SELECT fund_code FROM fund_scraper_status 
                WHERE data_status IN ('pending', 'failed') 
                ORDER BY fund_code
            """)
            
            results = cursor.fetchall()
            fund_codes = [row[0] for row in results]
            
            cursor.close()
            connection.close()
            
            total_funds = len(fund_codes)
            
            if total_funds == 0:
                logger.info("所有基金已下载完成，无需继续下载")
                return
            
            logger.info(f"找到{total_funds}个未完成的基金，使用{download_mode}模式进行下载")
            
            for i, fund_code in enumerate(fund_codes, 1):
                if not self.is_running:
                    logger.info("断点续传被停止")
                    break
                
                logger.info(f"正在处理第{i}/{total_funds}个基金: {fund_code} ({download_mode}模式)")
                if self.enable_parallel:
                    self.download_fund_nav_data_parallel(fund_code, restart=False)
                else:
                    self.download_fund_nav_data(fund_code, restart=False)
                
                time.sleep(2)
            
            if self.is_running:
                logger.info(f"断点续传完成 - {download_mode}模式")
            else:
                logger.info(f"断点续传被中断 - {download_mode}模式")
            
        except Exception as e:
            logger.error(f"断点续传过程中发生错误: {e}")
        finally:
            self.is_running = False
    
    def get_download_statistics(self) -> Dict:
        """获取下载统计信息"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            
            # 获取状态统计
            cursor.execute("""
                SELECT data_status, COUNT(*) as count 
                FROM fund_scraper_status 
                GROUP BY data_status
            """)
            status_stats = dict(cursor.fetchall())
            
            # 获取总记录数
            cursor.execute("SELECT COUNT(*) FROM fund_nav_detail_new")
            total_nav_records = cursor.fetchone()[0]
            
            # 获取最新更新日期
            cursor.execute("SELECT MAX(last_update_date) FROM fund_scraper_status")
            last_update = cursor.fetchone()[0]
            
            # 获取今日下载统计
            cursor.execute("""
                SELECT COUNT(*) FROM fund_nav_detail_new 
                WHERE DATE(created_at) = CURDATE()
            """)
            today_records = cursor.fetchone()[0]
            
            statistics = {
                'total_funds': sum(status_stats.values()),
                'completed_funds': status_stats.get('completed', 0),
                'downloading_funds': status_stats.get('downloading', 0),
                'pending_funds': status_stats.get('pending', 0),
                'failed_funds': status_stats.get('failed', 0),
                'total_nav_records': total_nav_records,
                'today_records': today_records,
                'last_update': last_update.strftime('%Y-%m-%d') if last_update else None,
                'is_running': self.is_running
            }
            
            return statistics
            
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}
        finally:
            if cursor:
                cursor.close()
            connection.close()
    
    def get_fund_status_list(self, page: int = 1, per_page: int = 20) -> Dict:
        """获取基金状态列表"""
        connection = self.get_database_connection()
        cursor = None
        try:
            cursor = connection.cursor()
            
            # 获取总数
            cursor.execute("SELECT COUNT(*) FROM fund_scraper_status")
            total_count = cursor.fetchone()[0]
            
            # 获取分页数据
            offset = (page - 1) * per_page
            cursor.execute("""
                SELECT fs.fund_code, fi.fund_name, fs.data_status, 
                       fs.total_records, fs.retry_count, fs.last_update_date,
                       fs.error_message
                FROM fund_scraper_status fs
                LEFT JOIN fund_info_detail fi ON fs.fund_code = fi.fund_code
                ORDER BY fs.last_update_date DESC, fs.fund_code
                LIMIT %s OFFSET %s
            """, (per_page, offset))
            
            results = cursor.fetchall()
            
            fund_list = []
            for row in results:
                fund_info = {
                    'fund_code': row[0],
                    'fund_name': row[1] or '未知',
                    'data_status': row[2],
                    'total_records': row[3] or 0,
                    'retry_count': row[4] or 0,
                    'last_update_date': row[5].strftime('%Y-%m-%d') if row[5] else None,
                    'error_message': row[6]
                }
                fund_list.append(fund_info)
            
            return {
                'total_count': total_count,
                'page': page,
                'per_page': per_page,
                'total_pages': (total_count + per_page - 1) // per_page,
                'fund_list': fund_list
            }
            
        except Exception as e:
            logger.error(f"获取基金状态列表失败: {e}")
            return {'total_count': 0, 'fund_list': []}
        finally:
            if cursor:
                cursor.close()
            connection.close()

if __name__ == "__main__":
    # 测试下载器
    scraper = FundNavDetailScraper()
    
    # 测试单个基金下载
    test_fund_code = "000001"
    success = scraper.download_fund_nav_data(test_fund_code)
    
    if success:
        print(f"基金{test_fund_code}下载成功")
        
        # 显示统计信息
        stats = scraper.get_download_statistics()
        print("下载统计:")
        for key, value in stats.items():
            print(f"  {key}: {value}")
    else:
        print(f"基金{test_fund_code}下载失败")