import json
import tushare as ts
from datetime import datetime
import pandas as pd
import time
import random
import os

class ConceptMemberExporter:
    def __init__(self):
        # Tushare初始化
        self.pro = ts.pro_api('20241126212238-f4c9b74f-0059-4a93-9d1f-53dae167179a')
        self.pro._DataApi__http_url = 'http://tsapi.majors.ltd:7000'
        self.retry_times = 3  # 最大重试次数
        self.base_sleep_time = 1  # 基础等待时间（秒）
    
    def api_call_with_retry(self, func, *args, **kwargs):
        """带重试机制的API调用"""
        for i in range(self.retry_times):
            try:
                # 添加随机延时，避免并发访问
                time.sleep(self.base_sleep_time + random.random())
                return func(*args, **kwargs)
            except Exception as e:
                print(f"第 {i+1} 次调用失败: {str(e)}")
                if "token" in str(e).lower() and i < self.retry_times - 1:
                    # 如果是token限制错误，等待更长时间
                    wait_time = (i + 1) * 2
                    print(f"等待 {wait_time} 秒后重试...")
                    time.sleep(wait_time)
                elif i == self.retry_times - 1:
                    raise e
    
    def fetch_concept_codes(self):
        """获取所有概念板块代码"""
        try:
            print("开始获取概念板块代码...")
            df = self.api_call_with_retry(self.pro.ths_index, type='N')
            if df is not None and not df.empty:
                print(f"成功获取到 {len(df)} 个概念板块代码")
                return df['ts_code'].tolist()
            return []
        except Exception as e:
            print(f"获取概念板块代码失败: {str(e)}")
            return []

    def fetch_concept_members(self, concept_code):
        """获取单个概念板块的成分股数据"""
        try:
            print(f"正在获取概念 {concept_code} 的成分股数据...")
            df = self.api_call_with_retry(self.pro.ths_member, ts_code=concept_code)
            return df
        except Exception as e:
            print(f"获取概念 {concept_code} 的成分股数据失败: {str(e)}")
            return None

    def process_for_unicloud(self, df, concept_code):
        """处理数据为uniCloud格式"""
        records = []
        current_time = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
        
        for _, row in df.iterrows():
            # 创建基础记录
            record = {
                "concept_code": concept_code,     # 概念板块代码
                "stock_code": row['code'],        # 股票代码
                "stock_name": row['name'],        # 股票名称
                "is_new": "Y",                    # 默认为Y
                "create_time": {"$date": current_time},
                "update_time": {"$date": current_time}
            }
            
            # 处理可选字段
            if 'weight' in row and pd.notna(row['weight']):
                record['weight'] = float(row['weight'])
            
            # 处理日期字段
            date_fields = ['in_date', 'out_date']
            for field in date_fields:
                if field in row and pd.notna(row[field]):
                    date_str = str(row[field])
                    if len(date_str) == 8:
                        formatted_date = f"{date_str[:4]}-{date_str[4:6]}-{date_str[6:]}T00:00:00Z"
                        record[field] = {"$date": formatted_date}
            
            records.append(record)
        
        return records

    def run(self):
        """运行导出流程"""
        concept_codes = self.fetch_concept_codes()
        
        if not concept_codes:
            print("未获取到任何概念板块代码")
            return
        
        print(f"共获取到 {len(concept_codes)} 个概念板块")
        
        all_records = []
        for i, concept_code in enumerate(concept_codes, 1):
            print(f"正在处理第 {i}/{len(concept_codes)} 个概念板块: {concept_code}")
            
            df = self.fetch_concept_members(concept_code)
            if df is not None and not df.empty:
                records = self.process_for_unicloud(df, concept_code)
                all_records.extend(records)
                print(f"成功处理 {len(records)} 条成分股数据")
            else:
                print(f"概念板块 {concept_code} 未获取到数据")
            
            # 添加随机延时，避免请求过于频繁
            time.sleep(self.base_sleep_time + random.random())
        
        if all_records:
            print("\n开始导出所有数据...")
            self.export_to_jsonl(all_records)
            print(f"共导出 {len(all_records)} 条成分股关系记录")
        else:
            print("没有数据需要导出")
        
        print("导出完成")

    def export_to_jsonl(self, all_records, filename='stock_concept_member_data.json'):
        """导出为JSONL格式"""
        try:
            record_count = len(all_records)
            print(f"准备导出 {record_count} 条记录...")
            
            with open(filename, 'w', encoding='utf-8') as f:
                for i, record in enumerate(all_records, 1):
                    f.write(json.dumps(record, ensure_ascii=False) + '\n')
                    if i % 1000 == 0:
                        print(f"已处理 {i}/{record_count} 条记录...")
            
            print(f"成功导出数据到 {filename}")
            file_size = os.path.getsize(filename) / 1024 / 1024
            print(f"导出文件大小：{file_size:.2f}MB")
            
        except Exception as e:
            print(f"导出数据失败: {str(e)}")

if __name__ == "__main__":
    exporter = ConceptMemberExporter()
    exporter.run() 