# 主力排名数据爬虫
# 专门用于抓取主力排名数据

import requests
import time
import random
import json
import re
import sys
import os
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlencode
from storage import DataStorage

# 获取当前路径（兼容PyInstaller打包后的情况）
def get_current_path():
    if hasattr(sys, '_MEIPASS'):
        return os.path.dirname(os.path.abspath(sys.executable))
    return os.path.dirname(os.path.abspath(__file__))

# 尝试导入配置，如果失败则使用默认值
try:
    from config import REQUEST_TIMEOUT, RETRY_COUNT, DELAY_TIME, HEADERS, MAIN_FORCE_CONFIG, DEBUG_MODE
except ImportError:
    print("警告：无法导入config模块，使用默认配置")
    # 使用默认配置值
    REQUEST_TIMEOUT = 10
    RETRY_COUNT = 3
    DELAY_TIME = 2
    DEBUG_MODE = False  # 默认关闭调试模式
    HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive',
    }
    MAIN_FORCE_CONFIG = {
        'base_url': 'https://finance.sina.com.cn',
        'main_force_url': 'https://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_ssggzj',
        'encoding': 'gb2312',
        'target_table_keywords': ['主力', '排名', '资金', '净流入', '净流出', '主力资金'],
        'expected_columns': ['排名', '股票代码', '股票名称', '主力净流入', '涨跌幅', '换手率', '成交额']
    }

class MainForceRankCrawler:
    """主力排名数据爬虫类"""
    
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update(HEADERS)
        self.storage = DataStorage()
        self.config = MAIN_FORCE_CONFIG
        
    def _make_request(self, url, max_retries=RETRY_COUNT, params=None):
        """发送HTTP请求"""
        for attempt in range(max_retries):
            try:
                # 添加随机延迟，避免被反爬
                time.sleep(random.uniform(0.5, 1.5))
                
                if params:
                    response = self.session.get(url, params=params, timeout=REQUEST_TIMEOUT)
                else:
                    response = self.session.get(url, timeout=REQUEST_TIMEOUT)
                
                response.raise_for_status()
                
                # 检查响应内容是否有效
                if response.status_code == 200 and len(response.content) > 10:
                    return response
                else:
                    print(f"响应内容异常，状态码: {response.status_code}, 内容长度: {len(response.content)}")
                    
            except requests.exceptions.RequestException as e:
                print(f"请求失败（尝试 {attempt + 1}/{max_retries}）：{e}")
                if attempt < max_retries - 1:
                    time.sleep(2)  # 重试前等待2秒
        return None
    
    def _parse_json_data(self, response_text):
        """解析JSON格式的主力排名数据"""
        try:
            # 新浪财经API返回的是JSONP格式，需要提取JSON部分
            if response_text.startswith('(') and response_text.endswith(')'):
                # 去除JSONP包装
                json_str = response_text[1:-1]
            else:
                json_str = response_text
            
            # 解析JSON数据
            data = json.loads(json_str)
            
            if isinstance(data, list):
                return data
            else:
                print("返回数据格式不是预期的列表格式")
                return []
                
        except json.JSONDecodeError as e:
            print(f"JSON解析失败: {e}")
            print(f"响应内容: {response_text[:500]}...")
            return []
    
    def _standardize_main_force_data(self, raw_data):
        """标准化主力排名数据字段"""
        standardized = {}
        
        # 新浪财经主力排名API字段映射（根据实际API响应调整）
        field_mapping = {
            'symbol': 'stock_code',              # 股票代码
            'name': 'stock_name',               # 股票名称
            'trade': 'current_price',            # 当前价格
            'changeratio': 'price_change_rate',  # 涨跌幅
            'turnover': 'turnover_rate',         # 换手率
            'amount': 'turnover_amount',         # 成交额
            'netamount': 'net_inflow',           # 净流入
            'r0_net': 'main_force_net_inflow',   # 主力净流入
            'r0_ratio': 'main_force_ratio',      # 主力占比
            'rank': 'rank'                       # 排名
        }
        
        for raw_key, value in raw_data.items():
            # 查找对应的标准字段名
            if raw_key in field_mapping:
                standardized[field_mapping[raw_key]] = value
            else:
                # 保留原字段名
                standardized[raw_key] = value
        
        return standardized
    
    def crawl_main_force_data(self):
        """爬取主力排名数据"""
        target_url = self.config['main_force_url']
        print(f"开始爬取主力排名数据: {target_url}")
        
        # 构建请求参数
        params = {
            'sort': 'netamount',  # 按净流入排序
            'asc': '0',           # 降序排列
            'page': '1',          # 第一页
            'num': '50',          # 获取50条数据
            'daima': 'bk'         # 板块代码
        }
        
        # 发送请求
        response = self._make_request(target_url, params=params)
        if not response:
            print("无法获取API数据")
            return 0
        
        # 设置正确的编码
        if self.config.get('encoding'):
            response.encoding = self.config['encoding']
        
        # 仅在调试模式下保存原始响应内容
        if DEBUG_MODE:
            try:
                # 获取当前路径并构建输出文件路径
                current_dir = get_current_path()
                response_file = os.path.join(current_dir, 'main_force_response.json')
                
                with open(response_file, 'w', encoding='utf-8') as f:
                    f.write(response.text)
                print(f"[调试] API响应已保存到 {response_file}")
            except Exception as e:
                print(f"[调试] 保存API响应文件失败: {e}")
        
        # 解析JSON数据
        main_force_data = self._parse_json_data(response.text)
        
        if not main_force_data:
            print("未解析到有效的主力排名数据")
            return 0
        
        # 标准化数据并保存
        saved_count = 0
        for i, item in enumerate(main_force_data, 1):
            # 添加排名信息
            standardized_item = self._standardize_main_force_data(item)
            standardized_item['rank'] = i
            
            # 添加时间戳和来源信息
            standardized_item['crawl_time'] = time.strftime('%Y-%m-%d %H:%M:%S')
            standardized_item['source_url'] = target_url
            
            if self.storage.add_data(standardized_item):
                saved_count += 1
        
        print(f"主力排名数据爬取完成，获取 {len(main_force_data)} 条记录，保存 {saved_count} 条")
        return saved_count
    
    def run(self):
        """运行主力排名爬虫"""
        print("开始主力排名数据爬取任务...")
        print(f"目标URL: {self.config['main_force_url']}")
        
        saved_count = self.crawl_main_force_data()
        
        if saved_count > 0:
            # 保存数据到文件，使用默认文件名（已自动添加时间戳）
            success = self.storage.save_data()
            if not success:
                print("数据保存失败")
        else:
            print("没有获取到任何主力排名数据")
        
        return saved_count

def main():
    """主函数"""
    crawler = MainForceRankCrawler()
    crawler.run()

if __name__ == "__main__":
    main()