import requests
import json
import time
import logging
from typing import List, Dict
from urllib.parse import urlencode

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

class DLTCrawler:
    def __init__(self):
        self.base_url = 'https://jc.zhcw.com/port/client_json.php'
        self.headers = {
            'Accept': '*/*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'DNT': '1',
            'Pragma': 'no-cache',
            'Referer': 'https://www.zhcw.com/',
            'Sec-Fetch-Dest': 'script',
            'Sec-Fetch-Mode': 'no-cors',
            'Sec-Fetch-Site': 'same-site',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
            'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"'
        }

    def fetch_lottery_data(self, retries: int = 3) -> List[Dict]:
        """从API获取开奖数据，支持分页获取"""
        all_lottery_data = []
        page_num = 1
        page_size = 30

        while True:
            params = {
                'callback': f'jQuery{int(time.time()*1000)}_{int(time.time()*1000)}',
                'transactionType': '10001001',
                'lotteryId': '281',  # 大乐透的lotteryId
                'issueCount': '1000',
                'startIssue': '',
                'endIssue': '',
                'startDate': '',
                'endDate': '',
                'type': '0',
                'pageNum': str(page_num),
                'pageSize': str(page_size),
                'tt': f'{time.time()}',
                '_': f'{int(time.time()*1000)}'
            }

            url = f'{self.base_url}?{urlencode(params)}'

            for attempt in range(retries):
                try:
                    response = requests.get(url, headers=self.headers, timeout=10)
                    response.raise_for_status()
                    # 提取JSON数据（移除JSONP回调包装）
                    json_str = response.text
                    json_str = json_str[json_str.index('(') + 1:json_str.rindex(')')]
                    data = json.loads(json_str)

                    if 'data' not in data or not data['data']:
                        logging.info(f'No more data found at page {page_num}')
                        return all_lottery_data

                    page_data = []
                    for item in data['data']:
                        page_data.append({
                            'draw_date': item['openTime'],
                            'draw_num': item['issue'],
                            'red_numbers': item['frontWinningNum'].split(),  # 前区5个号码
                            'blue_numbers': item['backWinningNum'].split()  # 后区2个号码
                        })

                    all_lottery_data.extend(page_data)
                    logging.info(f'Successfully fetched page {page_num} with {len(page_data)} records')

                    # 如果当前页的数据量小于页面大小，说明已经没有更多数据了
                    if len(page_data) < page_size:
                        return all_lottery_data

                    # 继续获取下一页数据
                    page_num += 1
                    break

                except Exception as e:
                    if attempt == retries - 1:
                        logging.error(f'Failed to fetch data at page {page_num} after {retries} attempts: {str(e)}')
                        return all_lottery_data
                    time.sleep(2 ** attempt)  # 指数退避

    def crawl(self) -> List[Dict]:
        """执行爬虫主流程"""
        try:
            lottery_data = self.fetch_lottery_data()
            logging.info(f'Successfully crawled {len(lottery_data)} lottery records')
            return lottery_data
        except Exception as e:
            logging.error(f'Crawling failed: {str(e)}')
            return []

if __name__ == '__main__':
    crawler = DLTCrawler()
    data = crawler.crawl()
    for record in data:
        print(record)