import re
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from typing import Dict, Optional

class SSQBackupCrawler:
    """双色球备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/ssq.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            red_balls = [b.text.strip().zfill(2) for b in ball_p.find_all('b', class_='rb')]
            blue_ball_tag = ball_p.find('b', class_='bb')
            blue_ball = blue_ball_tag.text.strip().zfill(2) if blue_ball_tag else None

            if not (draw_num and date and len(red_balls) == 6 and blue_ball):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(red_balls),
                'blue_number': blue_ball
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers']
        blue_number = data['blue_number']
        if not (len(red_numbers) == 6 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        if not (re.match(r'^\d{1,2}$', blue_number)):
            return False
        return True

class FC3DBackupCrawler:
    """福彩3D备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/3d.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            balls = [b.text.strip() for b in ball_p.find_all('b', class_='rb')]

            if not (draw_num and date and len(balls) == 3):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(balls),
                'blue_number': ''
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers'].split(',') if isinstance(data['red_numbers'], str) else data['red_numbers']
        if not (len(red_numbers) == 3 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        return True

class KL8BackupCrawler:
    """快乐8备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/kl8.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            balls = [b.text.strip().zfill(2) for b in ball_p.find_all('b', class_='rb')]

            if not (draw_num and date and len(balls) == 20):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(balls),
                'blue_number': ''
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers'].split(',') if isinstance(data['red_numbers'], str) else data['red_numbers']
        if not (len(red_numbers) == 20 and all(re.match(r'^\d{2}$', ball) for ball in red_numbers)):
            return False
        return True

class QLCBackupCrawler:
    """七乐彩备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/7lc.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            red_balls = [b.text.strip().zfill(2) for b in ball_p.find_all('b', class_='rb')]
            blue_ball_tag = ball_p.find('b', class_='bb')
            blue_ball = blue_ball_tag.text.strip().zfill(2) if blue_ball_tag else None

            if not (draw_num and date and len(red_balls) == 7 and blue_ball):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(red_balls),
                'blue_number': blue_ball
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers']
        blue_number = data['blue_number']
        if not (len(red_numbers) == 6 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        if not (re.match(r'^\d{1,2}$', blue_number)):
            return False
        return True

class PL3BackupCrawler:
    """排列3备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/pl3.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            balls = [b.text.strip() for b in ball_p.find_all('b', class_='rb')]

            if not (draw_num and date and len(balls) == 3):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(balls),
                'blue_number': ''
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers'].split(',') if isinstance(data['red_numbers'], str) else data['red_numbers']
        if not (len(red_numbers) == 3 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        return True

class DLTBackupCrawler:
    """大乐透备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/dlt.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            red_balls = [b.text.strip().zfill(2) for b in ball_p.find_all('b', class_='rb')]
            blue_balls = [b.text.strip().zfill(2) for b in ball_p.find_all('b', class_='bb')]

            if not (draw_num and date and len(red_balls) == 5 and len(blue_balls) == 2):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(red_balls),
                'blue_number': ','.join(blue_balls)
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers'].split(',') if isinstance(data['red_numbers'], str) else data['red_numbers']
        blue_numbers = data['blue_number'].split(',') if isinstance(data['blue_number'], str) else data['blue_number']
        if not (len(red_numbers) == 5 and all(re.match(r'^\d{2}$', ball) for ball in red_numbers)):
            return False
        if not (len(blue_numbers) == 2 and all(re.match(r'^\d{2}$', ball) for ball in blue_numbers)):
            return False
        return True

class PL5BackupCrawler:
    """排列5备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/pl5.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            balls = [b.text.strip() for b in ball_p.find_all('b', class_='rb')]

            if not (draw_num and date and len(balls) == 5):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(balls),
                'blue_number': ''
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers'].split(',') if isinstance(data['red_numbers'], str) else data['red_numbers']
        if not (len(red_numbers) == 5 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        return True

class QXCBackupCrawler:
    """7星彩备用爬虫 - 从17500.cn获取数据"""
    def __init__(self):
        self.source_url = "https://m.17500.cn/lottery-m/7xc.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

    def fetch_latest_draw(self) -> Optional[Dict]:
        try:
            response = requests.get(self.source_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 期号
            draw_num_tag = soup.find('b', class_='fblue mr10')
            draw_num = None
            if draw_num_tag:
                match = re.search(r'第(\d+)期', draw_num_tag.text)
                if match:
                    draw_num = match.group(1)

            # 日期
            date_tag = soup.find('i', class_='f12')
            date = None
            if date_tag:
                match = re.search(r'(\d{4}-\d{2}-\d{2})', date_tag.text)
                if match:
                    date = match.group(1)

            # 开奖号码
            ball_p = soup.find('p', class_='ball')
            if not ball_p:
                print('未找到开奖号码容器')
                return None

            red_balls = [b.text.strip() for b in ball_p.find_all('b', class_='rb')]
            blue_ball_tag = ball_p.find('b', class_='bb')
            blue_ball = blue_ball_tag.text.strip() if blue_ball_tag else None

            if not (draw_num and date and len(red_balls) == 6 and blue_ball):
                print('数据不完整')
                return None

            return {
                'draw_date': date,
                'draw_number': draw_num,
                'red_numbers': ','.join(red_balls),
                'blue_number': blue_ball
            }
        except Exception as e:
            print(f'爬取数据失败: {str(e)}')
            return None

    def is_valid_data(self, data: Dict) -> bool:
        if not data:
            return False
        try:
            datetime.strptime(data['draw_date'], '%Y-%m-%d')
        except ValueError:
            return False
        if not re.match(r'^\d{7}$', data['draw_number']):
            return False
        red_numbers = data['red_numbers']
        blue_number = data['blue_number']
        if not (len(red_numbers) == 6 and all(re.match(r'^\d{1,2}$', ball) for ball in red_numbers)):
            return False
        if not (re.match(r'^\d{1,2}$', blue_number)):
            return False
        return True 