# import os
# import pandas as pd
# import requests
# from urllib.parse import urljoin, urlparse
# import time
# from pathlib import Path
# import logging

# # 配置日志
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# logger = logging.getLogger(__name__)

# class WebPageScraper:
#     def __init__(self, input_dir, output_dir="snapshots", delay=1):
#         """
#         初始化网页爬取器
        
#         Args:
#             input_dir (str): 包含Excel文件的输入目录
#             output_dir (str): 保存快照的输出目录
#             delay (int): 请求间隔时间（秒）
#         """
#         self.input_dir = Path(input_dir)
#         self.output_dir = Path(output_dir)
#         self.delay = delay
        
#         # 创建输出目录
#         self.output_dir.mkdir(exist_ok=True)
        
#         # 设置请求头，模拟浏览器
#         self.headers = {
#             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
#         }
        
#         # 设置会话
#         self.session = requests.Session()
#         self.session.headers.update(self.headers)
    
#     def find_xlsx_files(self):
#         """递归查找所有xlsx文件"""
#         xlsx_files = []
#         for root, dirs, files in os.walk(self.input_dir):
#             for file in files:
#                 if file.lower().endswith('.xlsx'):
#                     xlsx_files.append(Path(root) / file)
#         return xlsx_files
    
#     def read_excel_column(self, file_path, column_index=6):
#         """
#         读取Excel文件指定列的数据
        
#         Args:
#             file_path (Path): Excel文件路径
#             column_index (int): 列索引（从1开始，第6列）
            
#         Returns:
#             list: URL列表
#         """
#         try:
#             # 读取Excel文件
#             df = pd.read_excel(file_path)
            
#             # 获取第6列（索引为5）
#             if len(df.columns) >= column_index:
#                 column_data = df.iloc[:, column_index - 1].dropna()
#                 urls = column_data.astype(str).tolist()
                
#                 # 过滤出有效的URL
#                 valid_urls = []
#                 for url in urls:
#                     url = url.strip()
#                     if url and (url.startswith('http://') or url.startswith('https://')):
#                         valid_urls.append(url)
                
#                 return valid_urls
#             else:
#                 logger.warning(f"文件 {file_path} 没有第{column_index}列")
#                 return []
                
#         except Exception as e:
#             logger.error(f"读取Excel文件 {file_path} 时出错: {e}")
#             return []
    
#     def download_webpage(self, url, output_path):
#         """
#         下载网页内容并保存
        
#         Args:
#             url (str): 网页URL
#             output_path (Path): 保存路径
            
#         Returns:
#             bool: 是否成功下载
#         """
#         try:
#             # 增加超时时间，添加更多请求头
#             headers = self.headers.copy()
#             headers.update({
#                 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
#                 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
#                 'Accept-Encoding': 'gzip, deflate, br',
#                 'Connection': 'keep-alive',
#                 'Upgrade-Insecure-Requests': '1',
#             })
            
#             # 先发送HEAD请求检查资源是否存在
#             try:
#                 head_response = self.session.head(url, timeout=15, headers=headers, allow_redirects=True)
#                 logger.info(f"HEAD请求状态码: {head_response.status_code} for {url}")
#             except Exception as e:
#                 logger.warning(f"HEAD请求失败: {e}, 继续尝试GET请求")
            
#             # 发送GET请求
#             response = self.session.get(url, timeout=60, headers=headers, allow_redirects=True)
#             response.raise_for_status()
            
#             # 记录响应信息
#             logger.info(f"响应状态码: {response.status_code}")
#             logger.info(f"内容类型: {response.headers.get('content-type', 'unknown')}")
#             logger.info(f"内容长度: {response.headers.get('content-length', 'unknown')}")
            
#             # 根据内容类型和URL确定文件扩展名
#             content_type = response.headers.get('content-type', '').lower()
#             url_lower = url.lower()
            
#             if 'pdf' in content_type or url_lower.endswith('.pdf'):
#                 extension = '.pdf'
#                 is_binary = True
#             elif 'image' in content_type or any(url_lower.endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']):
#                 # 根据URL确定图片扩展名
#                 for ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
#                     if url_lower.endswith(ext):
#                         extension = ext
#                         break
#                 else:
#                     extension = '.jpg'  # 默认
#                 is_binary = True
#             elif 'html' in content_type:
#                 extension = '.html'
#                 is_binary = False
#             elif 'json' in content_type:
#                 extension = '.json'
#                 is_binary = False
#             elif 'xml' in content_type:
#                 extension = '.xml'
#                 is_binary = False
#             elif 'text' in content_type:
#                 extension = '.txt'
#                 is_binary = False
#             elif url_lower.endswith('.doc'):
#                 extension = '.doc'
#                 is_binary = True
#             elif url_lower.endswith('.docx'):
#                 extension = '.docx'
#                 is_binary = True
#             elif url_lower.endswith('.xls'):
#                 extension = '.xls'
#                 is_binary = True
#             elif url_lower.endswith('.xlsx'):
#                 extension = '.xlsx'
#                 is_binary = True
#             else:
#                 extension = '.html'  # 默认为html
#                 is_binary = False
            
#             # 添加扩展名到输出路径
#             final_output_path = output_path.with_suffix(extension)
            
#             # 根据文件类型保存内容
#             if is_binary:
#                 with open(final_output_path, 'wb') as f:
#                     f.write(response.content)
#             else:
#                 # 尝试检测编码
#                 encoding = response.encoding if response.encoding else 'utf-8'
#                 try:
#                     content = response.content.decode(encoding)
#                 except UnicodeDecodeError:
#                     # 如果解码失败，尝试其他编码
#                     for enc in ['utf-8', 'gbk', 'gb2312', 'latin1']:
#                         try:
#                             content = response.content.decode(enc)
#                             break
#                         except UnicodeDecodeError:
#                             continue
#                     else:
#                         # 都失败了，使用错误替换
#                         content = response.content.decode('utf-8', errors='replace')
                
#                 with open(final_output_path, 'w', encoding='utf-8') as f:
#                     f.write(content)
            
#             logger.info(f"成功下载: {url} -> {final_output_path} ({len(response.content)} bytes)")
#             return True
            
#         except requests.exceptions.Timeout as e:
#             logger.error(f"下载超时 {url}: {e}")
#             return False
#         except requests.exceptions.HTTPError as e:
#             logger.error(f"HTTP错误 {url}: {e} (状态码: {response.status_code if 'response' in locals() else 'unknown'})")
#             return False
#         except requests.exceptions.ConnectionError as e:
#             logger.error(f"连接错误 {url}: {e}")
#             return False
#         except requests.exceptions.RequestException as e:
#             logger.error(f"请求错误 {url}: {e}")
#             return False
#         except Exception as e:
#             logger.error(f"保存 {url} 快照时出错: {e}")
#             return False
    
#     def process_excel_file(self, excel_path):
#         """
#         处理单个Excel文件
        
#         Args:
#             excel_path (Path): Excel文件路径
#         """
#         logger.info(f"处理Excel文件: {excel_path}")
        
#         # 获取相对于输入目录的路径
#         relative_path = excel_path.relative_to(self.input_dir)
        
#         # 构建对应的输出目录结构
#         output_subdir = self.output_dir / relative_path.parent
#         output_subdir.mkdir(parents=True, exist_ok=True)
        
#         # 获取文件名（不含扩展名）
#         base_name = excel_path.stem
        
#         # 读取第6列的URL
#         urls = self.read_excel_column(excel_path)
        
#         if not urls:
#             logger.warning(f"文件 {excel_path} 中没有找到有效的URL")
#             return
        
#         logger.info(f"在 {excel_path} 中找到 {len(urls)} 个URL")
        
#         # 下载每个URL的快照
#         for index, url in enumerate(urls, 1):
#             # 构造输出文件名，保持目录结构
#             snapshot_name = f"{base_name}-{index}"
#             output_path = output_subdir / snapshot_name
            
#             logger.info(f"正在下载第 {index} 个URL: {url}")
#             logger.info(f"保存路径: {output_path}")
            
#             # 下载网页快照
#             success = self.download_webpage(url, output_path)
            
#             if not success:
#                 logger.warning(f"跳过失败的URL: {url}")
            
#             # 添加延迟，避免请求过于频繁
#             time.sleep(self.delay)
    
#     def run(self):
#         """运行主程序"""
#         logger.info(f"开始扫描目录: {self.input_dir}")
        
#         # 查找所有xlsx文件
#         xlsx_files = self.find_xlsx_files()
        
#         if not xlsx_files:
#             logger.warning(f"在目录 {self.input_dir} 中没有找到xlsx文件")
#             return
        
#         logger.info(f"找到 {len(xlsx_files)} 个xlsx文件")
        
#         # 处理每个Excel文件
#         for excel_file in xlsx_files:
#             try:
#                 self.process_excel_file(excel_file)
#             except Exception as e:
#                 logger.error(f"处理文件 {excel_file} 时出现错误: {e}")
#                 continue
        
#         logger.info("所有文件处理完成！")

# def main():
#     """主函数"""
#     # 配置参数
#     input_directory = input("请输入包含Excel文件的目录路径: ").strip()
    
#     if not input_directory:
#         input_directory = "."  # 默认当前目录
    
#     # 检查目录是否存在
#     if not os.path.exists(input_directory):
#         print(f"错误：目录 {input_directory} 不存在")
#         return
    
#     # 询问输出目录
#     output_directory = input("请输入快照保存目录（默认为'snapshots'）: ").strip()
#     if not output_directory:
#         output_directory = "snapshots"
    
#     # 询问请求延迟
#     try:
#         delay = float(input("请输入请求间隔时间（秒，默认为0.5）: ") or "0.5")
#     except ValueError:
#         delay = 0.5

#     print(f"\n配置信息:")
#     print(f"输入目录: {input_directory}")
#     print(f"输出目录: {output_directory}")
#     print(f"请求延迟: {delay}秒")
#     print(f"将读取Excel文件的第6列作为URL源")
    
#     confirm = input("\n是否开始处理？(y/N): ").strip().lower()
#     if confirm != 'y':
#         print("操作已取消")
#         return
    
#     # 创建爬虫实例并运行
#     scraper = WebPageScraper(input_directory, output_directory, delay)
#     scraper.run()

# if __name__ == "__main__":
#     main()

import os
import pandas as pd
import requests
from urllib.parse import urljoin, urlparse
import time
from pathlib import Path
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class WebPageScraper:
    def __init__(self, input_dir, output_dir="snapshots", delay=1):
        """
        初始化网页爬取器
        
        Args:
            input_dir (str): 包含Excel文件的输入目录
            output_dir (str): 保存快照的输出目录
            delay (int): 请求间隔时间（秒）
        """
        self.input_dir = Path(input_dir)
        self.output_dir = Path(output_dir)
        self.delay = delay
        
        # 创建输出目录
        self.output_dir.mkdir(exist_ok=True)
        
        # 设置请求头，模拟浏览器
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # 设置会话
        self.session = requests.Session()
        self.session.headers.update(self.headers)
    
    def find_xlsx_files(self):
        """递归查找所有xlsx文件"""
        xlsx_files = []
        for root, dirs, files in os.walk(self.input_dir):
            for file in files:
                if file.lower().endswith('.xlsx'):
                    xlsx_files.append(Path(root) / file)
        return xlsx_files
    
    def read_excel_column(self, file_path, column_index=6):
        """
        读取Excel文件指定列的数据
        
        Args:
            file_path (Path): Excel文件路径
            column_index (int): 列索引（从1开始，第6列）
            
        Returns:
            list: URL列表
        """
        try:
            # 读取Excel文件
            df = pd.read_excel(file_path)
            
            # 获取第6列（索引为5）
            if len(df.columns) >= column_index:
                column_data = df.iloc[:, column_index - 1].dropna()
                urls = column_data.astype(str).tolist()
                
                # 过滤出有效的URL
                valid_urls = []
                for url in urls:
                    url = url.strip()
                    if url and (url.startswith('http://') or url.startswith('https://')):
                        valid_urls.append(url)
                
                return valid_urls
            else:
                logger.warning(f"文件 {file_path} 没有第{column_index}列")
                return []
                
        except Exception as e:
            logger.error(f"读取Excel文件 {file_path} 时出错: {e}")
            return []
    
    def check_file_exists(self, output_path):
        """
        检查文件是否已存在（支持多种扩展名）
        
        Args:
            output_path (Path): 基础输出路径（不含扩展名）
            
        Returns:
            tuple: (是否存在, 存在的文件路径)
        """
        # 可能的文件扩展名列表
        possible_extensions = [
            '.html', '.pdf', '.jpg', '.jpeg', '.png', '.gif', '.bmp',
            '.json', '.xml', '.txt', '.doc', '.docx', '.xls', '.xlsx'
        ]
        
        for ext in possible_extensions:
            file_path = output_path.with_suffix(ext)
            if file_path.exists():
                return True, file_path
        
        return False, None
    
    def download_webpage(self, url, output_path):
        """
        下载网页内容并保存 - 优化版本，失败时快速跳过，包含保存超时机制
        
        Args:
            url (str): 网页URL
            output_path (Path): 保存路径
            
        Returns:
            bool: 是否成功下载
        """
        import signal
        
        def timeout_handler(signum, frame):
            raise TimeoutError("保存操作超时")
        
        try:
            # 设置较短的超时时间，快速失败
            headers = self.headers.copy()
            headers.update({
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
            })
            
            # 直接发送GET请求，设置较短的超时时间
            response = self.session.get(url, timeout=10, headers=headers, allow_redirects=True)
            response.raise_for_status()
            
            # 根据内容类型和URL确定文件扩展名
            content_type = response.headers.get('content-type', '').lower()
            url_lower = url.lower()
            
            if 'pdf' in content_type or url_lower.endswith('.pdf'):
                extension = '.pdf'
                is_binary = True
            elif 'image' in content_type or any(url_lower.endswith(ext) for ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']):
                # 根据URL确定图片扩展名
                for ext in ['.jpg', '.jpeg', '.png', '.gif', '.bmp']:
                    if url_lower.endswith(ext):
                        extension = ext
                        break
                else:
                    extension = '.jpg'  # 默认
                is_binary = True
            elif 'html' in content_type:
                extension = '.html'
                is_binary = False
            elif 'json' in content_type:
                extension = '.json'
                is_binary = False
            elif 'xml' in content_type:
                extension = '.xml'
                is_binary = False
            elif 'text' in content_type:
                extension = '.txt'
                is_binary = False
            elif url_lower.endswith('.doc'):
                extension = '.doc'
                is_binary = True
            elif url_lower.endswith('.docx'):
                extension = '.docx'
                is_binary = True
            elif url_lower.endswith('.xls'):
                extension = '.xls'
                is_binary = True
            elif url_lower.endswith('.xlsx'):
                extension = '.xlsx'
                is_binary = True
            else:
                extension = '.html'  # 默认为html
                is_binary = False
            
            # 添加扩展名到输出路径
            final_output_path = output_path.with_suffix(extension)
            
            # 设置保存操作的超时机制（20秒）
            if os.name != 'nt':  # Unix/Linux系统使用signal
                signal.signal(signal.SIGALRM, timeout_handler)
                signal.alarm(20)  # 20秒超时
            
            try:
                # 根据文件类型保存内容
                save_start_time = time.time()
                
                if is_binary:
                    with open(final_output_path, 'wb') as f:
                        f.write(response.content)
                else:
                    # 尝试检测编码
                    encoding = response.encoding if response.encoding else 'utf-8'
                    try:
                        content = response.content.decode(encoding)
                    except UnicodeDecodeError:
                        # 如果解码失败，尝试其他编码
                        for enc in ['utf-8', 'gbk', 'gb2312', 'latin1']:
                            try:
                                content = response.content.decode(enc)
                                break
                            except UnicodeDecodeError:
                                continue
                        else:
                            # 都失败了，使用错误替换
                            content = response.content.decode('utf-8', errors='replace')
                    
                    with open(final_output_path, 'w', encoding='utf-8') as f:
                        f.write(content)
                
                save_time = time.time() - save_start_time
                
                # 检查保存是否超时（Windows系统的手动检查）
                if os.name == 'nt' and save_time > 20:
                    logger.warning(f"✗ 保存超时跳过: {url} (耗时 {save_time:.1f}秒)")
                    # 删除可能不完整的文件
                    if final_output_path.exists():
                        final_output_path.unlink()
                    return False
                
                logger.info(f"✓ 成功下载: {url} -> {final_output_path} ({len(response.content)} bytes, 保存耗时 {save_time:.1f}秒)")
                return True
                
            finally:
                # 取消alarm信号
                if os.name != 'nt':
                    signal.alarm(0)
            
        except TimeoutError:
            logger.warning(f"✗ 保存超时跳过: {url} (超过20秒)")
            # 删除可能不完整的文件
            try:
                final_output_path = output_path.with_suffix(extension if 'extension' in locals() else '.html')
                if final_output_path.exists():
                    final_output_path.unlink()
            except:
                pass
            return False
        except requests.exceptions.Timeout:
            logger.warning(f"✗ 下载超时跳过: {url}")
            return False
        except requests.exceptions.HTTPError as e:
            logger.warning(f"✗ HTTP错误跳过: {url} (状态码: {response.status_code if 'response' in locals() else 'unknown'})")
            return False
        except requests.exceptions.ConnectionError:
            logger.warning(f"✗ 连接失败跳过: {url}")
            return False
        except requests.exceptions.RequestException:
            logger.warning(f"✗ 请求失败跳过: {url}")
            return False
        except Exception as e:
            logger.warning(f"✗ 保存失败跳过: {url} - {str(e)[:50]}")
            return False
    
    def process_excel_file(self, excel_path):
        """
        处理单个Excel文件
        
        Args:
            excel_path (Path): Excel文件路径
        """
        logger.info(f"处理Excel文件: {excel_path}")
        
        # 获取相对于输入目录的路径
        relative_path = excel_path.relative_to(self.input_dir)
        
        # 构建对应的输出目录结构
        output_subdir = self.output_dir / relative_path.parent
        output_subdir.mkdir(parents=True, exist_ok=True)
        
        # 获取文件名（不含扩展名）
        base_name = excel_path.stem
        
        # 读取第6列的URL
        urls = self.read_excel_column(excel_path)
        
        if not urls:
            logger.warning(f"文件 {excel_path} 中没有找到有效的URL")
            return
        
        logger.info(f"在 {excel_path} 中找到 {len(urls)} 个URL")
        
        # 统计下载结果
        success_count = 0
        skip_count = 0
        total_count = len(urls)
        
        # 下载每个URL的快照
        for index, url in enumerate(urls, 1):
            # 构造输出文件名，保持目录结构
            snapshot_name = f"{base_name}-{index}"
            output_path = output_subdir / snapshot_name
            
            # 检查文件是否已存在
            file_exists, existing_file = self.check_file_exists(output_path)
            if file_exists:
                logger.info(f"[{index}/{total_count}] ⏭ 跳过已存在: {url} -> {existing_file.name}")
                skip_count += 1
                continue
            
            logger.info(f"[{index}/{total_count}] 下载: {url}")
            
            # 下载网页快照
            success = self.download_webpage(url, output_path)
            
            if success:
                success_count += 1
            
            # 添加延迟，避免请求过于频繁
            if self.delay > 0:
                time.sleep(self.delay)
        
        # 输出统计信息
        logger.info(f"文件 {excel_path.name} 处理完成: 成功 {success_count}, 跳过 {skip_count}, 失败 {total_count-success_count-skip_count}, 总计 {total_count}")
    
    def run(self):
        """运行主程序"""
        logger.info(f"开始扫描目录: {self.input_dir}")
        
        # 查找所有xlsx文件
        xlsx_files = self.find_xlsx_files()
        
        if not xlsx_files:
            logger.warning(f"在目录 {self.input_dir} 中没有找到xlsx文件")
            return
        
        logger.info(f"找到 {len(xlsx_files)} 个xlsx文件")
        
        # 统计总体结果
        total_files = len(xlsx_files)
        processed_files = 0
        total_success = 0
        total_skip = 0
        total_fail = 0
        
        # 处理每个Excel文件
        for file_index, excel_file in enumerate(xlsx_files, 1):
            try:
                logger.info(f"=== 处理文件 {file_index}/{total_files}: {excel_file.name} ===")
                
                # 临时保存统计信息
                old_success = total_success
                old_skip = total_skip
                old_fail = total_fail
                
                self.process_excel_file(excel_file)
                processed_files += 1
                
            except Exception as e:
                logger.error(f"处理文件 {excel_file} 时出现错误: {e}")
                continue
        
        logger.info(f"=== 全部处理完成！成功处理 {processed_files}/{total_files} 个Excel文件 ===")

def main():
    """主函数"""
    # 配置参数
    input_directory = input("请输入包含Excel文件的目录路径: ").strip()
    
    if not input_directory:
        input_directory = "."  # 默认当前目录
    
    # 检查目录是否存在
    if not os.path.exists(input_directory):
        print(f"错误：目录 {input_directory} 不存在")
        return
    
    # 询问输出目录
    output_directory = input("请输入快照保存目录（默认为'snapshots'）: ").strip()
    if not output_directory:
        output_directory = "snapshots"
    
    # 询问请求延迟
    try:
        delay = float(input("请输入请求间隔时间（秒，默认为0.2）: ") or "0.2")
    except ValueError:
        delay = 0.2

    print(f"\n配置信息:")
    print(f"输入目录: {input_directory}")
    print(f"输出目录: {output_directory}")
    print(f"请求延迟: {delay}秒")
    print(f"将读取Excel文件的第6列作为URL源")
    print(f"下载超时时间: 10秒")
    print(f"保存超时时间: 20秒（超时自动跳过）")
    print(f"智能跳过: 已存在的文件将自动跳过")
    
    confirm = input("\n是否开始处理？(y/N): ").strip().lower()
    if confirm != 'y':
        print("操作已取消")
        return
    
    # 创建爬虫实例并运行
    scraper = WebPageScraper(input_directory, output_directory, delay)
    scraper.run()

if __name__ == "__main__":
    main()