#  -*- coding: utf-8 -*-

"""
普量学院量化投资课程系列案例源码包
普量学院版权所有
仅用于教学目的，严禁转发和用于盈利目的，违者必究
©Plouto-Quants All Rights Reserved

普量学院助教微信：niuxiaomi3
"""

from pymongo import UpdateOne
from database import DB_CONN
import tushare as ts
from datetime import datetime
import pandas as pd
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock
from collections import deque

"""
从tushare pro获取日K数据，保存到本地的MongoDB数据库中
多线程优化版本：使用线程池并行处理，提供详细的性能统计
"""

class RateLimiter:
    """API调用频率限制器 - 动态版本"""
    def __init__(self, max_calls_per_minute=800):
        self.max_calls_per_minute = max_calls_per_minute
        self.calls = deque()
        self.lock = Lock()
        self.last_wait_time = 0
    
    def wait_if_needed(self):
        """动态频率控制"""
        with self.lock:
            now = time.time()
            # 移除超过1分钟的调用记录
            while self.calls and now - self.calls[0] > 60:
                self.calls.popleft()
            
            # 计算当前分钟内的调用次数
            current_calls = len(self.calls)
            
            # 如果接近限制，动态调整等待时间
            if current_calls >= self.max_calls_per_minute * 0.8:  # 80%时开始控制
                # 计算动态等待时间
                if current_calls >= self.max_calls_per_minute:
                    # 超过限制，必须等待
                    wait_time = 60 - (now - self.calls[0]) + 0.1
                    if wait_time > 0:
                        print(f"⚠️  API频率限制，等待 {wait_time:.2f} 秒...")
                        time.sleep(wait_time)
                        return self.wait_if_needed()
                else:
                    # 接近限制，短暂等待
                    wait_time = 0.2 * (current_calls / self.max_calls_per_minute)
                    if wait_time > 0.02:  # 最小等待20ms
                        time.sleep(wait_time)
            
            # 记录本次调用
            self.calls.append(now)


class DailyCrawler:
    def __init__(self, token=None, max_workers=10):
        """
        初始化 - 只支持tushare pro API
        :param token: tushare pro的token，如果为None则从环境变量获取
        """
        
        # 获取token
        if token is None:
            token = os.getenv('TUSHARE_TOKEN')
        
        if not token:
            raise ValueError("必须设置tushare pro token! 请访问 https://tushare.pro/ 获取token")
        
        try:
            ts.set_token(token)
            self.pro = ts.pro_api()
            print("✅ 使用tushare pro API初始化成功")
        except Exception as e:
            raise ValueError(f"tushare pro初始化失败: {e}")

        # 创建数据集
        self.daily = DB_CONN['daily']
        self.daily_hfq = DB_CONN['daily_hfq']
        
        # 线程池
        self.max_workers = max_workers
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        
        # 性能统计变量
        self.total_api_time = 0
        self.total_process_time = 0
        self.total_save_time = 0
        self.api_call_count = 0
        self.success_count = 0
        self.error_count = 0
        
        # API调用频率限制器
        self.rate_limiter = RateLimiter(max_calls_per_minute=800)
        
        print(f"🚦 API调用频率限制：每分钟最多 800 次")

    def _call_api_with_rate_limit(self, api_func, *args, **kwargs):
        """带频率限制的API调用"""
        self.rate_limiter.wait_if_needed()
        return api_func(*args, **kwargs)

    def crawl_index(self, begin_date=None, end_date=None):
        """
        抓取指数的日K数据
        
        :param begin_date: 开始日期，格式：YYYY-MM-DD
        :param end_date: 结束日期，格式：YYYY-MM-DD
        """
        
        print("🚀 开始抓取指数数据...")
        start_time = time.time()
        
        # 重置统计变量
        self._reset_stats()
        
        # 指数列表
        index_codes = [
            '000001.SH',  # 上证指数
            '000300.SH',  # 沪深300
            '399001.SZ',  # 深证成指
            '399005.SZ',  # 中小板指
            '399006.SZ'   # 创业板指
        ]
        
        # 转换日期格式
        start_date, end_date_pro = self._convert_dates(begin_date, end_date)
        
        print(f"指数列表：{len(index_codes)}个")
        print(f"日期范围：{begin_date or '当前日期'} 到 {end_date or '当前日期'}")
        
        # 抓取每个指数
        for i, ts_code in enumerate(index_codes):
            try:
                # API调用时间统计
                api_start = time.time()
                
                df_daily = self._call_api_with_rate_limit(
                    self.pro.index_daily,
                    ts_code=ts_code,
                    start_date=start_date,
                    end_date=end_date_pro
                )
                
                api_time = time.time() - api_start
                self.total_api_time += api_time
                self.api_call_count += 1
                
                # 数据处理时间统计
                process_start = time.time()
                
                # 转换数据格式
                df_daily = self._convert_pro_data_format(df_daily, is_index=True)
                
                # 提取股票代码（优化：直接切片）
                code = ts_code[:6]
                
                process_time = time.time() - process_start
                self.total_process_time += process_time
                
                # 保存数据
                save_start = time.time()
                self.save_data(code, df_daily, self.daily, {'index': True})
                save_time = time.time() - save_start
                self.total_save_time += save_time
                
                self.success_count += 1
                
                print(f'指数 {ts_code}: API {api_time:.2f}s, 处理 {process_time:.2f}s, 保存 {save_time:.2f}s, 数据 {len(df_daily)}条')
                
            except Exception as e:
                print(f'❌ 抓取指数 {ts_code} 数据时出错: {e}')
                self.error_count += 1
                continue
        
        # 输出性能统计
        total_time = time.time() - start_time
        self._print_performance_stats("指数数据", total_time)

    def crawl(self, begin_date=None, end_date=None, limit=None, batch_size=50):
        """
        抓取股票的日K数据 - 多线程版本
        
        :param begin_date: 开始日期，格式：YYYY-MM-DD
        :param end_date: 结束日期，格式：YYYY-MM-DD
        :param limit: 限制抓取的股票数量，用于测试
        :param batch_size: 每个线程处理的股票数量
        """
        
        print("🚀 开始抓取股票数据...")
        start_time = time.time()
        
        # 重置统计变量
        self._reset_stats()
        
        try:
            # 获取股票列表
            print("📋 获取股票列表...")
            stock_basic = self._call_api_with_rate_limit(
                self.pro.stock_basic,
                exchange='',
                list_status='L',  # 只获取上市的股票
                fields='ts_code,symbol,name,area,industry,list_date'
            )
            
            stock_codes = stock_basic['ts_code'].tolist()
            
            # 如果设置了限制，则只取前N个股票
            if limit:
                stock_codes = stock_codes[:limit]
                
            print(f"📊 股票总数：{len(stock_codes)}只")
            print(f"📅 日期范围：{begin_date or '当前日期'} 到 {end_date or '当前日期'}")
            print(f"🧵 线程数：{self.max_workers}")
            
        except Exception as e:
            print(f'❌ 获取股票列表时出错: {e}')
            return

        # 转换日期格式
        start_date, end_date_pro = self._convert_dates(begin_date, end_date)

        # 将股票列表分成批次
        batches = [stock_codes[i:i + batch_size] for i in range(0, len(stock_codes), batch_size)]
        
        # 创建任务列表
        futures = []
        for batch in batches:
            future = self.executor.submit(
                self._process_stock_batch,
                batch,
                start_date,
                end_date_pro
            )
            futures.append(future)
        
        # 等待所有任务完成并显示进度
        completed = 0
        total_batches = len(batches)
        
        for future in as_completed(futures):
            try:
                future.result()
                completed += 1
                
                # 每10次显示一次进度
                if completed % 10 == 0 or completed == total_batches:
                    elapsed = time.time() - start_time
                    avg_time = elapsed / completed
                    remaining = (total_batches - completed) * avg_time
                    
                    # 计算API调用速率
                    api_rate = self.api_call_count / (elapsed / 60) if elapsed > 0 else 0
                    
                    print(f'📦 进度: {completed}/{total_batches} ({completed/total_batches*100:.1f}%), '
                         f'API速率: {api_rate:.1f}/分钟, 预计剩余: {remaining/60:.1f}分钟')
                    
            except Exception as e:
                print(f"❌ 处理批次时出错: {e}")
        
        # 输出性能统计
        total_time = time.time() - start_time
        self._print_performance_stats("股票数据", total_time)

    def _process_stock_batch(self, stock_codes, start_date, end_date_pro):
        """处理一批股票数据"""
        all_normal_requests = []
        all_hfq_requests = []
        
        for ts_code in stock_codes:
            try:
                code = ts_code[:6]
                
                # API调用 - 使用频率限制
                api_start = time.time()
                df_daily = self._call_api_with_rate_limit(
                    self.pro.daily,
                    ts_code=ts_code,
                    start_date=start_date,
                    end_date=end_date_pro
                )
                df_daily_hfq = self._call_api_with_rate_limit(
                    self.pro.daily,
                    ts_code=ts_code,
                    start_date=start_date,
                    end_date=end_date_pro,
                    adj='hfq'
                )
                api_time = time.time() - api_start
                
                self.total_api_time += api_time
                self.api_call_count += 2
                
                # 数据处理
                process_start = time.time()
                df_daily = self._convert_pro_data_format(df_daily, is_index=False)
                df_daily_hfq = self._convert_pro_data_format(df_daily_hfq, is_index=False)
                process_time = time.time() - process_start
                
                self.total_process_time += process_time
                
                # 准备批量写入请求
                for df_index in df_daily.index:
                    doc = dict(df_daily.loc[df_index])
                    doc['code'] = code
                    doc['index'] = False
                    
                    all_normal_requests.append(
                        UpdateOne(
                            {'code': doc['code'], 'date': doc['date'], 'index': doc['index']},
                            {'$set': doc},
                            upsert=True
                        )
                    )
                
                for df_index in df_daily_hfq.index:
                    doc = dict(df_daily_hfq.loc[df_index])
                    doc['code'] = code
                    doc['index'] = False
                    
                    all_hfq_requests.append(
                        UpdateOne(
                            {'code': doc['code'], 'date': doc['date'], 'index': doc['index']},
                            {'$set': doc},
                            upsert=True
                        )
                    )
                
                self.success_count += 1
                
            except Exception as e:
                print(f'❌ 处理股票 {ts_code} 时出错: {e}')
                self.error_count += 1
                continue
        
        # 批量写入数据库
        save_start = time.time()
        if all_normal_requests:
            try:
                result = self.daily.bulk_write(all_normal_requests, ordered=False)
                print(f'💾 批量保存不复权数据: 插入 {result.upserted_count}条, 更新 {result.modified_count}条')
            except Exception as e:
                print(f'❌ 批量保存不复权数据时出错: {e}')
        
        if all_hfq_requests:
            try:
                result = self.daily_hfq.bulk_write(all_hfq_requests, ordered=False)
                print(f'💾 批量保存复权数据: 插入 {result.upserted_count}条, 更新 {result.modified_count}条')
            except Exception as e:
                print(f'❌ 批量保存复权数据时出错: {e}')
        
        save_time = time.time() - save_start
        self.total_save_time += save_time

    def save_data(self, code, df_daily, collection, extra_fields=None):
        """
        将数据保存到MongoDB - 优化版本
        
        :param code: 股票代码
        :param df_daily: 包含日线数据的DataFrame
        :param collection: 要保存的数据集
        :param extra_fields: 额外字段
        """
        
        if df_daily.empty:
            return
            
        # 预分配列表
        update_requests = []
        
        # 批量处理数据
        for df_index in df_daily.index:
            # 将DataFrame中的一行数据转dict
            doc = dict(df_daily.loc[df_index])
            # 设置股票代码
            doc['code'] = code
            
            # 如果指定了其他字段，则更新dict
            if extra_fields is not None:
                doc.update(extra_fields)
                
            # 生成更新请求
            update_requests.append(
                UpdateOne(
                    {'code': doc['code'], 'date': doc['date'], 'index': doc['index']},
                    {'$set': doc},
                    upsert=True
                )
            )
        
        # 批量写入数据库
        if update_requests:
            try:
                # 使用ordered=False提高性能
                update_result = collection.bulk_write(update_requests, ordered=False)
                # 只在有插入或更新时才打印（减少输出）
                if update_result.upserted_count > 0 or update_result.modified_count > 0:
                    print('💾 保存数据，代码: %s, 插入: %4d条, 更新: %4d条' %
                          (code, update_result.upserted_count, update_result.modified_count))
            except Exception as e:
                print(f'❌ 保存数据时出错，代码: {code}, 错误: {e}')

    def _convert_pro_data_format(self, df, is_index=False):
        """
        转换tushare pro数据格式 - 优化版本
        
        :param df: tushare pro返回的DataFrame
        :param is_index: 是否为指数数据
        :return: 转换后的DataFrame
        """
        if df.empty:
            return df
            
        # 优化：直接创建结果DataFrame，避免逐列赋值
        result_df = pd.DataFrame({
            'date': pd.to_datetime(df['trade_date']).dt.strftime('%Y-%m-%d'),
            'open': df['open'],
            'high': df['high'],
            'low': df['low'],
            'close': df['close'],
            'volume': df['vol'],  # tushare pro中成交量字段名为vol
            'index': is_index
        })
        
        return result_df
    
    def _convert_dates(self, begin_date, end_date):
        """
        转换日期格式
        """
        now = datetime.now().strftime('%Y%m%d')
        
        if begin_date is None:
            start_date = now
        else:
            start_date = begin_date.replace('-', '')
            
        if end_date is None:
            end_date_pro = now
        else:
            end_date_pro = end_date.replace('-', '')
            
        return start_date, end_date_pro
    
    def _reset_stats(self):
        """
        重置统计变量
        """
        self.total_api_time = 0
        self.total_process_time = 0
        self.total_save_time = 0
        self.api_call_count = 0
        self.success_count = 0
        self.error_count = 0
    
    def _print_performance_stats(self, data_type, total_time):
        """
        打印性能统计报告
        """
        print(f"\n{'='*50}")
        print(f"🏆 {data_type}抓取性能统计")
        print(f"{'='*50}")
        print(f"⏱️  总耗时: {total_time:.2f}秒")
        print(f"🔌 API调用总耗时: {self.total_api_time:.2f}秒 ({self.total_api_time/total_time*100:.1f}%)")
        print(f"⚙️  数据处理总耗时: {self.total_process_time:.2f}秒 ({self.total_process_time/total_time*100:.1f}%)")
        print(f"💾 数据保存总耗时: {self.total_save_time:.2f}秒 ({self.total_save_time/total_time*100:.1f}%)")
        print(f"📊 API调用次数: {self.api_call_count}次")
        print(f"📈 平均API调用时间: {self.total_api_time/self.api_call_count:.2f}秒/次" if self.api_call_count > 0 else "📈 平均API调用时间: 0秒/次")
        print(f"✅ 成功处理: {self.success_count}只")
        print(f"❌ 失败: {self.error_count}只")
        
        if self.success_count > 0:
            print(f"📊 平均每只股票处理时间: {total_time/self.success_count:.2f}秒")
            print(f"📈 成功率: {self.success_count/(self.success_count+self.error_count)*100:.1f}%")
        
        other_time = total_time - self.total_api_time - self.total_process_time - self.total_save_time
        print(f"🔄 其他耗时: {other_time:.2f}秒 ({other_time/total_time*100:.1f}%)")
        print(f"{'='*50}\n")

    def __del__(self):
        """析构函数，确保线程池正确关闭"""
        if hasattr(self, 'executor'):
            self.executor.shutdown(wait=True)





# 程序入口
if __name__ == '__main__':
    # tushare pro token
    TOKEN = "b8860eddcd1c2b8d4c2c31075fa172bb031f0d217755d346bf223fa8"
    
    # 创建爬虫实例 - 设置3个线程，避免频率限制
    dc = DailyCrawler(token=TOKEN, max_workers=3)
    
    print("🚀 开始数据抓取...")
    
    # 抓取指数数据
    print("\n📊 抓取指数数据...")
    dc.crawl_index('2015-01-01', '2021-01-01')
    
    # 抓取股票数据 - 多线程版本
    print("\n📈 抓取股票数据...")
    # 使用多线程批量抓取，每批15只股票，避免频率限制
    #dc.crawl('2015-01-01', '2020-01-01', batch_size=15)
    
    print("✅ 数据抓取完成!")
