"""
百度图片爬虫工具 - 加密版
作者：zyb
版权所有 © 2025.2.27
"""

import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import os
import scrapy
from scrapy import cmdline
import json
import re
from urllib.parse import quote
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import time
from datetime import datetime, timedelta
from functools import wraps
import hashlib
import base64
import sys
import uuid
import socket
import aiohttp
import asyncio
import aiofiles

# 添加一个计数器类来跟踪24小时内的爬取数量
class DailyCounter:
    def __init__(self):
        self.count = 0
        self.last_reset = datetime.now()
        self.daily_limit = 1000

        # 尝试从文件加载历史记录
        try:
            with open('crawler_history.json', 'r') as f:
                data = json.load(f)
                self.count = data['count']
                self.last_reset = datetime.fromisoformat(data['last_reset'])
        except:
            self.save_history()

    def increment(self, amount=1):
        # 检查是否需要重置计数器
        now = datetime.now()
        if (now - self.last_reset).days >= 1:
            self.count = 0
            self.last_reset = now

        # 检查是否超过限制
        if self.count + amount > self.daily_limit:
            return False

        self.count += amount
        self.save_history()
        return True

    def save_history(self):
        # 保存历史记录到文件
        data = {
            'count': self.count,
            'last_reset': self.last_reset.isoformat()
        }
        with open('crawler_history.json', 'w') as f:
            json.dump(data, f)

    def get_remaining(self):
        # 获取今日剩余可爬取数量
        return max(0, self.daily_limit - self.count)

# 创建计数器实例
daily_counter = DailyCounter()

class IPProtection:
    """IP保护类，只允许首次运行时的IP使用"""
    def __init__(self):
        if getattr(sys, 'frozen', False):
            # 如果是打包后的exe运行
            base_path = os.path.dirname(sys.executable)  # 使用exe所在目录
        else:
            # 如果是Python脚本运行
            base_path = os.path.abspath(".")

        self.data_file = os.path.join(base_path, '_internal', 'ip_protection.json')
        self.crawler_history_file = os.path.join(base_path, '_internal', 'crawler_history.json')

        # 确保_internal目录存在
        os.makedirs(os.path.dirname(self.data_file), exist_ok=True)

        self.daily_limit = 1000
        self.count = 0
        self.last_reset = datetime.now()
        self.current_ip = self.get_ip()
        self.valid = False
        self.check_ip()

    def get_ip(self):
        """获取本机IPv4地址"""
        hostname = socket.gethostname()
        ip = socket.gethostbyname(hostname)
        return hashlib.md5(ip.encode()).hexdigest()

    def check_ip(self):
        """检查IP是否为首次记录的IP"""
        try:
            if os.path.exists(self.data_file):
                with open(self.data_file, 'r') as f:
                    data = json.load(f)
                    # 如果IP不同，设置valid为False
                    if data.get('original_ip') != self.current_ip:
                        self.valid = False
                        return
                    # IP相同，加载使用数据
                    self.load_usage_data(data)
                    self.valid = True
            else:
                # 首次运行，记录IP
                self.save_data({
                    'original_ip': self.current_ip,
                    'count': self.count,
                    'last_reset': self.last_reset.strftime('%Y-%m-%d %H:%M:%S')
                })
                # 同时创建爬虫历史记录
                with open(self.crawler_history_file, 'w') as f:
                    json.dump({
                        'count': self.count,
                        'last_reset': self.last_reset.strftime('%Y-%m-%d %H:%M:%S')
                    }, f)
                self.valid = True
        except Exception as e:
            print(f"检查IP时出错: {e}")
            self.valid = False

    def load_usage_data(self, data):
        """加载使用数据"""
        try:
            last_reset = datetime.strptime(data['last_reset'], '%Y-%m-%d %H:%M:%S')
            if (datetime.now() - last_reset).days >= 1:
                self.reset_counter()
            else:
                self.count = data['count']
                self.last_reset = last_reset
        except Exception as e:
            print(f"加载使用数据出错: {e}")
            self.reset_counter()

    def save_data(self, data=None):
        """保存数据"""
        if data is None:
            data = {
                'original_ip': self.current_ip,
                'count': self.count,
                'last_reset': self.last_reset.strftime('%Y-%m-%d %H:%M:%S')
            }
        with open(self.data_file, 'w') as f:
            json.dump(data, f)

        # 同时更新爬虫历史记录
        with open(self.crawler_history_file, 'w') as f:
            json.dump({
                'count': self.count,
                'last_reset': self.last_reset.strftime('%Y-%m-%d %H:%M:%S')
            }, f)

    def reset_counter(self):
        """重置计数器"""
        self.count = 0
        self.last_reset = datetime.now()
        self.save_data()

    def increment(self, count):
        """增加计数"""
        if not self.valid:
            return False
        if self.count + count > self.daily_limit:
            return False
        self.count += count
        self.save_data()
        return True

    def get_remaining(self):
        """获取剩余配额"""
        if not self.valid:
            return 0
        return self.daily_limit - self.count

# 创建IP保护实例
daily_counter = IPProtection()

class BaiduImageDownloader:
    def __init__(self, keyword, save_path, image_count):
        if getattr(sys, 'frozen', False):
            # 如果是打包后的exe运行
            base_path = os.path.dirname(sys.executable)  # 使用exe所在目录
        else:
            # 如果是Python脚本运行
            base_path = os.path.abspath(".")

        self.crawler_history_file = os.path.join(base_path, '_internal', 'crawler_history.json')
        self.keyword = keyword
        self.save_path = save_path
        self.image_count = int(image_count)
        self.current_count = 0

        # 创建保存目录
        self.image_path = os.path.join(save_path, f'{keyword}_图片')
        if not os.path.exists(self.image_path):
            os.makedirs(self.image_path)

        # 创建CSV文件
        self.csv_path = os.path.join(save_path, f'{keyword}_图片信息.csv')
        with open(self.csv_path, 'w', encoding='utf-8-sig') as f:
            f.write('标题,图片链接\n')

    async def download_images(self):
        """下载图片的主函数"""
        async with aiohttp.ClientSession(
            headers={
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }
        ) as session:
            try:
                encoded_keyword = quote(str(self.keyword))
                pages_needed = (self.image_count + 29) // 30
                max_pages = min(pages_needed, 10)

                tasks = []
                for page in range(max_pages):
                    if self.current_count >= self.image_count:
                        break
                    tasks.append(self.process_page(session, page))

                await asyncio.gather(*tasks)
                return True

            except Exception as e:
                print(f"下载过程出错: {e}")
                return False

    async def process_page(self, session, page):
        """处理单个页面"""
        try:
            pn = page * 30
            gsm = hex(pn)[2:]
            encoded_keyword = quote(str(self.keyword))

            url = (
                'https://image.baidu.com/search/acjson?'
                'tn=resultjson_com&logid=&ipn=rj&ct=201326592&is=&fp=result'
                f'&fr=&word={encoded_keyword}&queryWord={encoded_keyword}&cl=2&lm=-1'
                f'&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=&copyright='
                f'&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&expermode='
                f'&nojc=&isAsync=&pn={pn}&rn=30&gsm={gsm}'
            )

            async with session.get(url, timeout=10) as response:
                if response.status != 200:
                    return

                data = await response.json(content_type=None)
                download_tasks = []

                for item in data.get('data', []):
                    if not item or self.current_count >= self.image_count:
                        break

                    image_url = item.get('hoverURL', '')
                    if not image_url:
                        continue

                    if not image_url.startswith('http'):
                        image_url = 'https://' + image_url

                    self.current_count += 1

                    # 保存图片信息到CSV
                    title = item.get('fromPageTitle', '').replace(',', ' ')
                    async with aiofiles.open(self.csv_path, 'a', encoding='utf-8-sig') as f:
                        await f.write(f'{title},{image_url}\n')

                    download_tasks.append(self.save_image(session, image_url, self.current_count))

                    if self.current_count >= self.image_count:
                        break

                if download_tasks:
                    await asyncio.gather(*download_tasks)

        except Exception as e:
            print(f"处理页面时出错: {e}")

    async def save_image(self, session, url, count):
        """保存单张图片"""
        try:
            async with session.get(url, timeout=10) as response:
                if response.status == 200:
                    image_file = os.path.join(self.image_path, f'{count}.jpg')
                    async with aiofiles.open(image_file, 'wb') as f:
                        await f.write(await response.read())
                    print(f'图片保存成功: {count}.jpg')
                else:
                    print(f'下载失败 ({count}): HTTP {response.status}')
        except Exception as e:
            print(f"下载图片时出错 ({count}): {e}")

class CrawlerGUI:
    def __init__(self):
        self.root = tk.Tk()
        self.root.title("百度图片爬虫 - 异步版本")
        self.root.geometry("610x610")

        # 设置主题样式
        style = ttk.Style()
        style.theme_use('clam')
        self.root.configure(bg='#f0f0f0')

        # 创建主框架
        main_frame = ttk.Frame(self.root, padding="20")
        main_frame.pack(fill=tk.BOTH, expand=True)

        # 标题标签
        title_label = ttk.Label(
            main_frame,
            text="百度图片爬取工具",
            font=('微软雅黑', 16, 'bold')
        )
        title_label.pack(pady=20)

        # 关键词输入区域
        keyword_frame = ttk.LabelFrame(main_frame, text="搜索设置", padding="10")
        keyword_frame.pack(fill=tk.X, pady=10)

        # 关键词输入
        keyword_container = ttk.Frame(keyword_frame)
        keyword_container.pack(fill=tk.X, pady=5)

        ttk.Label(keyword_container, text="请输入关键词：").pack(side=tk.LEFT)
        self.keyword_entry = ttk.Entry(keyword_container, width=50)
        self.keyword_entry.pack(side=tk.LEFT, padx=5, expand=True, fill=tk.X)

        # 图片数量选择
        count_container = ttk.Frame(keyword_frame)
        count_container.pack(fill=tk.X, pady=5)

        ttk.Label(count_container, text="爬取数量：").pack(side=tk.LEFT)
        self.count_var = tk.StringVar(value="30")  # 默认30张
        self.count_entry = ttk.Spinbox(
            count_container,
            from_=1,
            to=300,
            textvariable=self.count_var,
            width=10,
            font=('微软雅黑', 10)
        )
        self.count_entry.pack(side=tk.LEFT, padx=5)
        ttk.Label(count_container, text="张（最多300张）").pack(side=tk.LEFT)

        # 文件保存区域
        save_frame = ttk.LabelFrame(main_frame, text="保存设置", padding="10")
        save_frame.pack(fill=tk.X, pady=10)

        # 路径选择区域
        path_container = ttk.Frame(save_frame)
        path_container.pack(fill=tk.X, padx=5)

        ttk.Label(
            path_container,
            text="保存路径：",
            background='white'
        ).pack(side=tk.LEFT)

        self.save_path = tk.StringVar()
        path_entry = ttk.Entry(
            path_container,
            textvariable=self.save_path,
            width=40,
            font=('微软雅黑', 10)
        )
        path_entry.pack(side=tk.LEFT, padx=5, fill=tk.X, expand=True)

        browse_btn = ttk.Button(
            path_container,
            text="选择路径",
            command=self.browse_path
        )
        browse_btn.pack(side=tk.LEFT)

        # 添加剩余配额显示
        self.quota_label = ttk.Label(
            main_frame,
            text=f"今日剩余配额：{daily_counter.get_remaining()}张",
            font=('微软雅黑', 10),
            foreground='#666666'
        )
        self.quota_label.pack(pady=5)

        # 添加版权声明
        copyright_text = (
            "使用须知：\n"
            "1. 本工具每24小时最多可爬取1000张图片\n"
            "2. 爬取的图片仅供个人使用，禁止商业用途\n"
            "3. 禁止二次传播和分发\n"
        )
        copyright_label = ttk.Label(
            main_frame,
            text=copyright_text,
            font=('微软雅黑', 9),
            foreground='#999999',
            justify='left'
        )
        copyright_label.pack(pady=10)

        # 开始按钮
        start_btn = ttk.Button(
            main_frame,
            text="开始爬取",
            command=self.start_crawling,
            style='Accent.TButton'
        )
        start_btn.pack(pady=20)

        # 状态显示
        self.status_label = ttk.Label(
            main_frame,
            text="准备就绪",
            font=('微软雅黑', 11),
            foreground='#666666'
        )
        self.status_label.pack(pady=10)

        # 自定义按钮样式
        style.configure(
            'Accent.TButton',
            background='#007acc',
            foreground='white',
            padding=10,
            font=('微软雅黑', 10)
        )

    def browse_path(self):
        directory = filedialog.askdirectory()
        if directory:
            self.save_path.set(directory)

    def start_crawling(self):
        keyword = self.keyword_entry.get()
        save_path = self.save_path.get()
        try:
            image_count = int(self.count_var.get())

            # 检查IP是否有效
            if not daily_counter.valid:
                self.status_label.config(
                    text="⚠️ 检测到IP变化，此程序仅允许在首次运行的IP地址上使用！",
                    foreground='#dc3545'
                )
                return

            # 检查剩余配额
            remaining = daily_counter.get_remaining()
            if remaining <= 0:
                self.status_label.config(
                    text="⚠️ 已达到今日爬取上限，请24小时后再试！",
                    foreground='#dc3545'
                )
                return

            if image_count > remaining:
                self.status_label.config(
                    text=f"⚠️ 超出今日剩余配额！当前仅可爬取{remaining}张",
                    foreground='#dc3545'
                )
                return

            if not keyword:
                self.status_label.config(
                    text="⚠️ 请输入关键词！",
                    foreground='#dc3545'
                )
                return

            if not save_path:
                self.status_label.config(
                    text="⚠️ 请选择保存路径！",
                    foreground='#dc3545'
                )
                return

            self.status_label.config(
                text="🔄 正在下载中...",
                foreground='#007acc'
            )
            self.root.update()

            # 更新计数器
            if not daily_counter.increment(image_count):
                self.status_label.config(
                    text="⚠️ 操作失败：超出每日限额",
                    foreground='#dc3545'
                )
                return

            # 创建事件循环
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            # 创建下载器并运行
            downloader = BaiduImageDownloader(keyword, save_path, image_count)
            success = loop.run_until_complete(downloader.download_images())

            # 更新配额显示
            self.quota_label.config(
                text=f"今日剩余配额：{daily_counter.get_remaining()}张"
            )

            if success:
                self.status_label.config(
                    text=f"✅ 下载完成！已保存{image_count}张图片到：{save_path}",
                    foreground='#28a745'
                )
            else:
                self.status_label.config(
                    text="❌ 下载失败，请重试",
                    foreground='#dc3545'
                )

        except ValueError:
            self.status_label.config(
                text="⚠️ 请输入有效的图片数量！",
                foreground='#dc3545'
            )
            return
        except Exception as e:
            self.status_label.config(
                text=f"❌ 发生错误: {str(e)}",
                foreground='#dc3545'
            )
            return

    def run(self):
        self.root.mainloop()

class BaiduImageItem(scrapy.Item):
    """定义Item类"""
    type = scrapy.Field()
    title = scrapy.Field()
    image_url = scrapy.Field()
    filename = scrapy.Field()
    image_content = scrapy.Field()
    count = scrapy.Field()

class BaiduSpider(scrapy.Spider):
    name = "baidu"
    allowed_domains = ["image.baidu.com", "baidu.com"]
    custom_settings = {
        'ITEM_PIPELINES': {
            __name__ + '.BaiduImagePipeline': 300
        }
    }

    def __init__(self, keyword=None, save_path=None, image_count=30, *args, **kwargs):
        super(BaiduSpider, self).__init__(*args, **kwargs)
        self.keyword = keyword if keyword else "二次元"
        self.save_path = save_path
        self.image_count = int(image_count)
        self.current_count = 0

        # 确保图片保存目录存在
        self.image_path = os.path.join(save_path, f'{keyword}_图片')
        if not os.path.exists(self.image_path):
            os.makedirs(self.image_path)

        # 创建CSV文件
        self.csv_path = os.path.join(save_path, f'{keyword}_图片信息.csv')
        if not os.path.exists(self.csv_path):
            with open(self.csv_path, 'w', encoding='utf-8-sig', newline='') as f:
                f.write('标题,图片链接\n')

    def start_requests(self):
        encoded_keyword = quote(str(self.keyword))
        url_template = (
            'https://image.baidu.com/search/acjson?'
            'tn=resultjson_com&logid=&ipn=rj&ct=201326592&is=&fp=result'
            '&fr=&word={}&queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8'
            '&adpicid=&st=&z=&ic=&hd=&latest=&copyright=&s=&se=&tab='
            '&width=&height=&face=0&istype=2&qc=&nc=1&expermode=&nojc=&isAsync='
            '&pn={}&rn=30&gsm={}'
        )

        pages_needed = (self.image_count + 29) // 30
        max_pages = min(pages_needed, 10)

        for page in range(max_pages):
            pn = page * 30
            gsm = hex(pn)[2:]
            url = url_template.format(encoded_keyword, encoded_keyword, pn, gsm)
            request = scrapy.Request(
                url=url,
                callback=self.parse_json,
                dont_filter=True,
                errback=self.errback_httpbin,
            )
            yield request

    def parse_json(self, response):
        try:
            data = json.loads(response.text)
            for item in data.get('data', []):
                if not item or self.current_count >= self.image_count:
                    return

                image_url = item.get('hoverURL', '')
                if not image_url:
                    continue

                if not image_url.startswith('http'):
                    image_url = 'https://' + image_url

                title = item.get('fromPageTitle', '').replace(',', ' ')

                # 保存信息到CSV
                with open(self.csv_path, 'a', encoding='utf-8-sig', newline='') as f:
                    f.write(f'{title},{image_url}\n')

                self.current_count += 1
                request = scrapy.Request(
                    url=image_url,
                    callback=self.save_image,
                    errback=self.errback_httpbin,
                    meta={'count': self.current_count},
                    dont_filter=True
                )
                yield request

        except Exception as e:
            self.logger.error(f"解析JSON出错: {e}")

    def save_image(self, response):
        try:
            count = response.meta['count']
            image_file = os.path.join(self.image_path, f'{count}.jpg')

            with open(image_file, 'wb') as f:
                f.write(response.body)
            self.logger.info(f'图片保存成功: {count}.jpg')

        except Exception as e:
            self.logger.error(f"保存图片出错: {e}")

    def errback_httpbin(self, failure):
        self.logger.error(f"请求失败: {failure.value}")

class BaiduImagePipeline:
    """图片处理Pipeline"""

    def process_item(self, item, spider):
        if item['type'] == 'info':
            # 保存图片信息到CSV
            csv_path = os.path.join(spider.save_path, f'{spider.keyword}_图片信息.csv')
            if not os.path.exists(csv_path):
                with open(csv_path, 'w', encoding='utf-8-sig', newline='') as f:
                    f.write('标题,图片链接\n')

            with open(csv_path, 'a', encoding='utf-8-sig', newline='') as f:
                title = item['title'].replace(',', ' ')
                f.write(f'{title},{item["image_url"]}\n')

        elif item['type'] == 'image':
            # 保存图片
            image_path = os.path.join(spider.image_path, item['filename'])
            with open(image_path, 'wb') as f:
                f.write(item['image_content'])
            print(f'图片保存成功: {item["filename"]}')

        return item

def get_resource_path(relative_path):
    """获取资源文件的绝对路径"""
    if getattr(sys, 'frozen', False):
        # 如果是打包后的 exe 运行
        base_path = sys._MEIPASS
    else:
        # 如果是 Python 脚本运行
        base_path = os.path.abspath(".")
    return os.path.join(base_path, relative_path)

def run_spider(keyword, save_path, image_count):
    """运行爬虫的函数"""
    settings = get_project_settings()
    settings.set('SAVE_PATH', save_path)
    process = CrawlerProcess(settings)
    process.crawl(BaiduSpider, keyword=keyword, save_path=save_path, image_count=image_count)
    process.start()
    return True

if __name__ == "__main__":
    # 启动GUI程序
    app = CrawlerGUI()
    app.run()
    cmdline.execute('scrapy crawl baidu --loglevel=WARNING'.split())