import requests
from bs4 import BeautifulSoup
import os
import time
import threading
from urllib.parse import urljoin, urlparse
from queue import Queue


class MultiThreadImageScraper:
    def __init__(self, target_url, page_cap=24, image_cap=124, thread_num=5):
        self.target_url = target_url 
        self.page_cap = page_cap 
        self.image_cap = image_cap 
        self.thread_num = thread_num  
        self.saved_img_count = 0 
        self.crawled_pages = set() 
        self.page_task_queue = Queue() 
        self.thread_lock = threading.Lock()  
        self.http_client = requests.Session()  

        self.http_client.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })

        self.img_save_dir = 'images_multi'
        if not os.path.isdir(self.img_save_dir):
            os.makedirs(self.img_save_dir)

        self.page_task_queue.put(target_url)
        self.crawled_pages.add(target_url)

    def check_url_validity(self, url):
        """验证URL是否合法（原is_valid_url）"""
        parsed_url = urlparse(url)
        return bool(parsed_url.netloc) and bool(parsed_url.scheme)

    def fetch_and_save_image(self, img_src, source_page_url):
        """获取并保存单张图片（原download_image）"""
        with self.thread_lock:
            if self.saved_img_count >= self.image_cap:
                return False

        try:
            # 处理相对路径URL
            if not img_src.startswith(('http://', 'https://')):
                img_src = urljoin(source_page_url, img_src)

            # 跳过无效URL
            if not self.check_url_validity(img_src):
                return False

            # 仅支持指定图片格式（保持原格式集合）
            supported_formats = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp')
            if not img_src.lower().endswith(supported_formats):
                return False

            current_thread = threading.current_thread().name
            print(f"{current_thread} 正在获取图片: {img_src}")

            # 发起图片请求，保持10秒超时设置
            img_resp = self.http_client.get(img_src, timeout=10)
            img_resp.raise_for_status()  # 抛出HTTP错误

            # 提取文件名，处理无文件名的情况
            img_filename = os.path.basename(urlparse(img_src).path)
            if not img_filename:
                img_filename = f"pic_{self.saved_img_count + 1}.jpg"

            img_save_path = os.path.join(self.img_save_dir, img_filename)
            dup_counter = 1
            while os.path.exists(img_save_path):
                name_core, file_ext = os.path.splitext(img_filename)
                img_save_path = os.path.join(self.img_save_dir, f"{name_core}_{dup_counter}{file_ext}")
                dup_counter += 1

            # 保存图片文件
            with open(img_save_path, 'wb') as img_file:
                img_file.write(img_resp.content)

            # 原子操作更新计数
            with self.thread_lock:
                self.saved_img_count += 1
                current_total = self.saved_img_count

            print(f"{current_thread} 保存成功: {img_filename} (累计: {current_total}/{self.image_cap})")
            return True

        except Exception as err:
            print(f"{threading.current_thread().name} 图片获取失败 {img_src}: {err}")
            return False

    def parse_page_content(self, page_url):
        """解析单个页面的图片和链接（原process_page）"""
        current_thread = threading.current_thread().name
        print(f"{current_thread} 正在解析页面: {page_url}")

        try:
            # 发起页面请求，保持编码和超时设置
            page_resp = self.http_client.get(page_url, timeout=10)
            page_resp.raise_for_status()
            page_resp.encoding = 'utf-8'

            # 解析HTML内容
            html_soup = BeautifulSoup(page_resp.text, 'html.parser')

            # 提取并下载所有图片
            all_img_tags = html_soup.find_all('img')
            for img_tag in all_img_tags:
                # 检查是否已达图片上限
                with self.thread_lock:
                    if self.saved_img_count >= self.image_cap:
                        return

                # 获取图片地址（支持src和data-src属性）
                img_url = img_tag.get('src')
                if not img_url:
                    img_url = img_tag.get('data-src')
                
                if img_url:
                    self.fetch_and_save_image(img_url, page_url)

            # 检查是否已达页面爬取上限
            with self.thread_lock:
                if len(self.crawled_pages) >= self.page_cap:
                    return

            # 提取并添加新的页面链接
            all_links = html_soup.find_all('a', href=True)
            # 限制每页处理的链接数量（保持原限制8个）
            for link in all_links[:8]:
                with self.thread_lock:
                    # 双重限制检查：图片数量和页面数量
                    if self.saved_img_count >= self.image_cap or len(self.crawled_pages) >= self.page_cap:
                        return

                link_url = link['href']
                # 处理相对路径链接
                if not link_url.startswith('http'):
                    link_url = urljoin(page_url, link_url)

                # 验证链接合法性并添加到队列
                with self.thread_lock:
                    if (self.target_url in link_url and 
                        link_url not in self.crawled_pages and 
                        len(self.crawled_pages) < self.page_cap):
                        self.crawled_pages.add(link_url)
                        self.page_task_queue.put(link_url)

        except Exception as err:
            print(f"{current_thread} 页面解析失败 {page_url}: {err}")

    def thread_worker(self):
        """线程工作函数（原worker）"""
        while True:
            # 检查退出条件：图片达上限 或 队列空且页面达上限
            with self.thread_lock:
                exit_condition = (self.saved_img_count >= self.image_cap) or \
                                (self.page_task_queue.empty() and len(self.crawled_pages) >= self.page_cap)
                if exit_condition:
                    break

            try:
                # 从队列获取任务，超时5秒
                task_url = self.page_task_queue.get(timeout=5)
                self.parse_page_content(task_url)
                self.page_task_queue.task_done()
            except:
                break

    def start_scraping(self):
        """启动多线程爬取（原start_crawl）"""
        print("启动多线程图片爬取...")
        print(f"目标站点: {self.target_url}")
        print(f"最大爬取页数: {self.page_cap}")
        print(f"最大保存图片数: {self.image_cap}")
        print(f"线程数量: {self.thread_num}")
        print("-" * 50)

        start_time = time.time()

        # 创建并启动工作线程
        worker_threads = []
        for i in range(self.thread_num):
            thread = threading.Thread(target=self.thread_worker, name=f"Worker-{i + 1}")
            thread.daemon = True
            thread.start()
            worker_threads.append(thread)

        # 等待所有队列任务完成
        self.page_task_queue.join()

        # 等待所有线程安全退出
        for thread in worker_threads:
            thread.join(timeout=1)

        end_time = time.time()
        print("-" * 50)
        print("爬取任务完成!")
        print(f"总耗时: {end_time - start_time:.2f}秒")
        print(f"已爬页面数: {len(self.crawled_pages)}")
        print(f"成功保存图片数: {self.saved_img_count}")


if __name__ == "__main__":
    image_scraper = MultiThreadImageScraper(
        target_url="http://www.weather.com.cn",
        page_cap=24,
        image_cap=124,
        thread_num=5
    )
    image_scraper.start_scraping()