#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/12/27 22:23
# @Author : George
"""
PPT模板下载爬虫
主页面: https://www.51pptmoban.com/ppt/
下载流程:
1. 主页获取PPT详情页链接 
2. 详情页获取下载页面链接
3. 下载页面获取最终下载链接
4. 下载并解压PPT文件
"""

import os
import time
import random
import zipfile
import logging
from pathlib import Path
import requests
from lxml import etree
import chardet
from typing import List


class GetPPT:
    def __init__(self):
        # 设置输出目录
        self.output_dir = Path("./ppt_swdl")
        self.output_dir.mkdir(exist_ok=True)
        
        # 请求头配置
        self.headers = {
            "Cookie": "cf_clearance=NNFCN4uI76.b4eEN2LHBrVuMf0MHj0BvOX7NXce4LaE-1735309326-1.2.1.1-mZUvjGKK2RAJqZQLxLim1mOEGEIgvaC0AGIEZeMC7Dnwvi0ShWdhNB.YzxmzkYj60nC0iVy1EBcPNlFNwZ6cfX9DMJecwX1jVPU.OFSsNeViFK6fH0L0PPkRZZ82NLlhkCkcQPwBJS2ItFQOiAlAbjfzSgN4MFQcHQsft8vQg9C_YoO4BA8vVFN4Yonu8FoMhNSbqmH5Nkl3hMV0s05aezsm10yTGw.PsSBBFr4Nd1CKOIZpTQlAFl3SQSW4bZ2Dxb91CZqYEBGw9OMHiauAXbj0qNPz1lGo9D4S.cKe0qljRqOwmh8f6099KBL4GiBUYIDCCd50QXLmckCjPJ12NNE4wh09plczEbjFrmDc8M1p5H4V69JzhDPMhf8DIcM12xnyAKvpDf1x3UWymvnoxk2_c_oyZOt.zmzJ3hHMxgn0e4zu9hwXJi1dWxWUO48C; zkhanecookieclassrecord=%2C54%2C",
            "Referer": "https://pic.netbian.com/new/",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
        }
        
        # 网站URL配置
        self.base_url = "https://www.51pptmoban.com"
        self.first_url = f"{self.base_url}/ppt/"
        self.other_url = f"{self.base_url}/ppt/index_{{page}}.html"
        
        # 设置日志
        logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
        self.logger = logging.getLogger(__name__)

    def get_html_tree(self, url: str) -> etree._Element:
        """获取页面HTML并解析为etree对象"""
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            response.raise_for_status()
            encoding = chardet.detect(response.content)['encoding']
            response.encoding = encoding
            return etree.HTML(response.text)
        except Exception as e:
            self.logger.error(f"获取页面失败: {url}, 错误: {e}")
            raise

    def get_page_urls(self, url: str) -> List[str]:
        """获取页面中的PPT详情页链接"""
        tree = self.get_html_tree(url)
        urls = tree.xpath("//div[@class='pptlist']/dl/dd/h2/a/@href")
        return [f"{self.base_url}{url}" for url in urls]

    def download_ppt(self, url: str) -> None:
        """下载并解压单个PPT文件"""
        try:
            # 获取下载链接和标题
            tree = self.get_html_tree(url)
            download_path = tree.xpath('//div[@class="ppt_xz"]/ul/li[1]/a/@href')[0]
            title = tree.xpath('//div[@class="title_l"]/h1/text()')[0]
            
            # 获取最终下载链接
            download_page = self.get_html_tree(f"{self.base_url}/{download_path}")
            final_path = download_page.xpath('//div[@class="down"]/a[1]/@href')[0]
            zip_url = f"{self.base_url}/e/DownSys/{final_path.split('..')[1]}"
            
            # 下载并保存文件
            response = requests.get(zip_url, headers=self.headers)
            zip_path = self.output_dir / f"{title}.zip"
            zip_path.write_bytes(response.content)
            self.logger.info(f"下载成功: {title}")
            
            # 解压文件
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                for zip_info in zip_ref.infolist():
                    zip_info.filename = zip_info.filename.encode('cp437').decode('gbk')
                    zip_ref.extract(zip_info, self.output_dir)
            
            # 删除zip文件
            zip_path.unlink()
            
        except Exception as e:
            self.logger.error(f"下载失败: {url}, 错误: {e}")

    def process_pages(self, page_count: int) -> None:
        """处理指定页数的PPT下载"""
        for page in range(1, page_count + 1):
            url = self.first_url if page == 1 else self.other_url.format(page=page)
            self.logger.info(f"开始处理第{page}页")
            
            for ppt_url in self.get_page_urls(url):
                self.download_ppt(ppt_url)
                time.sleep(random.uniform(1, 4))  # 随机延时
                
            self.logger.info(f"第{page}页处理完成")

    def clean_files(self) -> None:
        """清理非PPT文件"""
        for file in self.output_dir.iterdir():
            if not file.suffix.lower() in ['.ppt', '.pptx']:
                file.unlink()
                self.logger.info(f"删除文件: {file.name}")


if __name__ == '__main__':
    downloader = GetPPT()
    try:
        downloader.process_pages(1)
        downloader.clean_files()
    except KeyboardInterrupt:
        downloader.logger.info("程序被用户中断")
    except Exception as e:
        downloader.logger.error(f"程序异常退出: {e}")
