import json
import requests
import os
import time
from concurrent.futures import ThreadPoolExecutor
from typing import List, Dict
from tqdm import tqdm

class Downloader:
    def __init__(self, 
                 use_proxy: bool = False,
                 num_threads: int = 10,
                 proxy_config: Dict = None,
                 save_batch_size: int = 100,
                 max_retries: int = 3,
                 retry_delay: int = 5):
        self.api_base = "https://www.szse.cn/api/disc/announcement/bulletin_detail/"
        self.use_proxy = use_proxy
        self.num_threads = num_threads
        self.proxy_config = proxy_config or {
            'http': 'http://127.0.0.1:7890',
            'https': 'http://127.0.0.1:7890'
        }
        self.save_batch_size = save_batch_size
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        
    def load_json_file(self, filename: str) -> List:
        if os.path.exists(filename):
            with open(filename, 'r', encoding='utf-8') as f:
                return json.load(f)
        return []

    def save_json_file(self, data: List, filename: str):
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def get_document_info(self, attach_id: str) -> Dict:
        for attempt in range(self.max_retries):
            try:
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                    'Referer': 'https://www.szse.cn/'
                }
                
                response = requests.get(
                    f"{self.api_base}{attach_id}",
                    proxies=self.proxy_config if self.use_proxy else None,
                    headers=headers,
                    verify=False,
                    timeout=10
                )
                response.raise_for_status()
                data = response.json()
                return {
                    'attach_id': attach_id,
                    'title': data.get('title'),
                    'attach_path': data.get('attachPath')
                }
            except requests.exceptions.ProxyError:
                if self.use_proxy and attempt < self.max_retries - 1:
                    print(f"Proxy error for {attach_id}, retrying without proxy...")
                    self.use_proxy = False
                    time.sleep(self.retry_delay)
                    continue
            except (requests.exceptions.RequestException, Exception) as e:
                if attempt < self.max_retries - 1:
                    print(f"Error fetching {attach_id} (attempt {attempt + 1}/{self.max_retries}): {str(e)}")
                    time.sleep(self.retry_delay)
                    continue
                else:
                    print(f"Failed to fetch {attach_id} after {self.max_retries} attempts: {str(e)}")
        return None

    def process_batch(self, attach_ids: List[str]) -> List[Dict]:
        with ThreadPoolExecutor(max_workers=self.num_threads) as executor:
            results = list(tqdm(
                executor.map(self.get_document_info, attach_ids),
                total=len(attach_ids),
                desc="Fetching document info"
            ))
        return [r for r in results if r is not None]

    def run(self):
        # 加载所有需要处理的 attach_ids
        all_ids = self.load_json_file('result.json')
        
        # 加载已处理的数据
        processed_urls = self.load_json_file('download_urls.json')
        processed_ids = self.load_json_file('download_attachid.json')
        
        # 获取待处理的 attach_ids
        processed_id_set = set(processed_ids)
        pending_ids = [id for id in all_ids if id not in processed_id_set]
        
        if not pending_ids:
            print("No new documents to process")
            return
        
        # 分批处理并保存
        batch_count = 0
        new_results = []
        
        for i in range(0, len(pending_ids), self.num_threads):
            batch_ids = pending_ids[i:i + self.num_threads]
            batch_results = self.process_batch(batch_ids)
            new_results.extend(batch_results)
            batch_count += len(batch_results)
            
            # 达到批量大小时保存
            if batch_count >= self.save_batch_size:
                processed_urls.extend(new_results)
                processed_ids.extend([r['attach_id'] for r in new_results])
                
                # 保存结果
                self.save_json_file(processed_urls, 'download_urls.json')
                self.save_json_file(processed_ids, 'download_attachid.json')
                
                print(f"Saved batch of {batch_count} documents")
                batch_count = 0
                new_results = []
        
        # 保存剩余的结果
        if new_results:
            processed_urls.extend(new_results)
            processed_ids.extend([r['attach_id'] for r in new_results])
            
            # 保存结果
            self.save_json_file(processed_urls, 'download_urls.json')
            self.save_json_file(processed_ids, 'download_attachid.json')
            
            print(f"Saved final batch of {batch_count} documents")

if __name__ == "__main__":
    PROXY = '101.200.231.167'  # 替换为要测试的代理
    proxies = {
        'http': f'http://Xniao:654321@{PROXY}:80',
        'https': f'http://Xniao:654321@{PROXY}:80'
    }
    # 使用示例
    downloader = Downloader(
        use_proxy=True,  # 是否使用代理
        num_threads=10,   # 线程数
        proxy_config=proxies,
        save_batch_size=100
    )
    downloader.run()