import requests
from bs4 import BeautifulSoup
import os
import time
from urllib.parse import urljoin, urlparse


class ImageScraperSingleThread:
    def __init__(self, target_url, page_limit=24, image_limit=124):
        self.target_url = target_url
        self.page_limit = page_limit 
        self.image_limit = image_limit
        self.saved_count = 0 
        self.crawled_pages = set() 
        self.http_session = requests.Session() 
        self.http_session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })

        self.save_folder = 'images_single'
        if not os.path.isdir(self.save_folder):
            os.makedirs(self.save_folder)

    def validate_url(self, url):
        """验证URL格式有效性（原is_valid_url）"""
        parsed_url = urlparse(url)
        return bool(parsed_url.netloc) and bool(parsed_url.scheme)

    def save_image(self, image_url, source_page):
        """下载并保存单张图片（原download_image）"""
        try:
            if not image_url.startswith(('http://', 'https://')):
                image_url = urljoin(source_page, image_url)

            if not self.validate_url(image_url):
                return False

            allowed_formats = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp')
            if not image_url.lower().endswith(allowed_formats):
                return False

            print(f"正在获取图片: {image_url}")

            img_response = self.http_session.get(image_url, timeout=10)
            img_response.raise_for_status() 

            file_name = os.path.basename(urlparse(image_url).path)
            if not file_name:
                file_name = f"pic_{self.saved_count + 1}.jpg"

            file_path = os.path.join(self.save_folder, file_name)
            duplicate_counter = 1
            while os.path.exists(file_path):
                name_without_ext, ext = os.path.splitext(file_name)
                file_path = os.path.join(self.save_folder, f"{name_without_ext}_{duplicate_counter}{ext}")
                duplicate_counter += 1

            with open(file_path, 'wb') as img_file:
                img_file.write(img_response.content)

            self.saved_count += 1
            print(f"保存成功: {file_name} (累计: {self.saved_count}/{self.image_limit})")
            return True

        except Exception as err:
            print(f"图片获取失败 {image_url}: {err}")
            return False

    def scrape_page(self, page_url):
        """爬取单个页面的图片和链接（原crawl_page）"""
        if page_url in self.crawled_pages or len(self.crawled_pages) >= self.page_limit:
            return

        print(f"正在解析页面: {page_url}")
        self.crawled_pages.add(page_url)

        try:
            page_response = self.http_session.get(page_url, timeout=10)
            page_response.raise_for_status()
            page_response.encoding = 'utf-8'

            # 解析页面内容
            html_parser = BeautifulSoup(page_response.text, 'html.parser')

            # 提取所有图片标签并下载
            all_img_tags = html_parser.find_all('img')
            for img_tag in all_img_tags:
                # 达到图片限制则停止
                if self.saved_count >= self.image_limit:
                    return

                # 获取图片真实地址（支持src和data-src）
                img_src = img_tag.get('src')
                if not img_src:
                    img_src = img_tag.get('data-src')
                
                if img_src:
                    self.save_image(img_src, page_url)

            # 提取后续页面链接（未达页面限制时）
            if len(self.crawled_pages) < self.page_limit:
                all_links = html_parser.find_all('a', href=True)
                for link in all_links[:10]:
                    if self.saved_count >= self.image_limit:
                        return

                    next_page_url = link['href']
                    # 处理相对路径链接
                    if not next_page_url.startswith('http'):
                        next_page_url = urljoin(page_url, next_page_url)

                    # 只爬取目标域名下的未爬取链接
                    if self.target_url in next_page_url and next_page_url not in self.crawled_pages:
                        self.scrape_page(next_page_url)

        except Exception as err:
            print(f"页面解析失败 {page_url}: {err}")

    def start_scraping(self):
        """启动爬取任务（原start_crawl）"""
        print("启动单线程图片爬取...")
        print(f"目标站点: {self.target_url}")
        print(f"最大爬取页数: {self.page_limit}")
        print(f"最大保存图片数: {self.image_limit}")
        print("-" * 50)

        start_timestamp = time.time()
        self.scrape_page(self.target_url)

        end_timestamp = time.time()
        print("-" * 50)
        print("爬取任务完成!")
        print(f"总耗时: {end_timestamp - start_timestamp:.2f}秒")
        print(f"已爬页面数: {len(self.crawled_pages)}")
        print(f"成功保存图片数: {self.saved_count}")


if __name__ == "__main__":
    image_scraper = ImageScraperSingleThread(
        target_url="http://www.weather.com.cn",
        page_limit=24,
        image_limit=124
    )
    image_scraper.start_scraping()