# import requests
# from bs4 import BeautifulSoup
# import PyPDF2
# import docx
# import re
# from urllib.parse import urlparse
# import os
# from io import BytesIO
# import html2text
# import chardet
# from readability import readability
# import justext
# import trafilatura
# from typing import Optional, Union

# class WebContentScraper:
#     """
#     网页内容智能爬取工具
#     支持HTML网页、PDF、Word等多种格式的正文提取
#     """
    
#     def __init__(self):
#         self.headers = {
#             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
#         }
#         self.timeout = 30
        
#     def scrape(self, url: str) -> Optional[str]:
#         """
#         主入口：根据URL爬取内容
        
#         Args:
#             url: 目标URL
            
#         Returns:
#             提取的正文内容，失败返回None
#         """
#         try:
#             # 发送请求
#             response = requests.get(url, headers=self.headers, timeout=self.timeout)
#             response.raise_for_status()
            
#             # 获取Content-Type
#             content_type = response.headers.get('Content-Type', '').lower()
            
#             # 根据内容类型处理
#             if 'application/pdf' in content_type:
#                 return self._extract_pdf_content(response.content)
#             elif 'text/html' in content_type:
#                 return self._extract_html_content(response.text, url)
#             elif 'text/plain' in content_type:
#                 return self._extract_text_content(response.content)
#             elif 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' in content_type:
#                 return self._extract_docx_content(response.content)
#             else:
#                 # 尝试从URL扩展名判断
#                 parsed_url = urlparse(url)
#                 path = parsed_url.path.lower()
                
#                 if path.endswith('.pdf'):
#                     return self._extract_pdf_content(response.content)
#                 elif path.endswith('.docx'):
#                     return self._extract_docx_content(response.content)
#                 elif path.endswith('.txt'):
#                     return self._extract_text_content(response.content)
#                 else:
#                     # 默认按HTML处理
#                     return self._extract_html_content(response.text, url)
                    
#         except Exception as e:
#             print(f"爬取失败: {e}")
#             return None
    
#     def _extract_html_content(self, html: str, url: str) -> str:
#         """
#         从HTML中提取正文内容，使用多种算法综合判断
#         """
#         # 方法1：使用trafilatura（效果最好的库之一）
#         try:
#             content = trafilatura.extract(html, include_comments=False, 
#                                         include_tables=True, no_fallback=False,
#                                         url=url)
#             if content and len(content) > 100:
#                 return content
#         except:
#             pass
        
#         # 方法2：使用readability
#         try:
#             doc = readability(html, url)
#             summary = doc.summary()
#             soup = BeautifulSoup(summary, 'html.parser')
#             content = soup.get_text(strip=True)
#             if content and len(content) > 100:
#                 return self._clean_text(content)
#         except:
#             pass
        
#         # 方法3：使用justext
#         try:
#             paragraphs = justext.justext(html, justext.get_stoplist("Chinese"))
#             content = '\n\n'.join([p.text for p in paragraphs if not p.is_boilerplate])
#             if content and len(content) > 100:
#                 return content
#         except:
#             pass
        
#         # 方法4：基础BeautifulSoup提取
#         return self._basic_html_extract(html)
    
#     def _basic_html_extract(self, html: str) -> str:
#         """
#         基础HTML内容提取方法
#         """
#         soup = BeautifulSoup(html, 'html.parser')
        
#         # 移除脚本和样式
#         for script in soup(["script", "style", "noscript"]):
#             script.decompose()
        
#         # 移除广告相关标签
#         ad_keywords = ['ad', 'ads', 'advertisement', 'sponsor', 'promo', 'banner']
#         for keyword in ad_keywords:
#             for tag in soup.find_all(class_=re.compile(keyword, re.I)):
#                 tag.decompose()
#             for tag in soup.find_all(id=re.compile(keyword, re.I)):
#                 tag.decompose()
        
#         # 提取主要内容区域
#         main_content = None
#         for tag in ['main', 'article', 'section']:
#             main_content = soup.find(tag)
#             if main_content:
#                 break
        
#         if not main_content:
#             # 查找包含最多段落的div
#             divs = soup.find_all('div')
#             max_p_count = 0
#             for div in divs:
#                 p_count = len(div.find_all('p'))
#                 if p_count > max_p_count:
#                     max_p_count = p_count
#                     main_content = div
        
#         if main_content:
#             text = main_content.get_text(strip=True)
#         else:
#             text = soup.get_text(strip=True)
        
#         return self._clean_text(text)
    
#     def _extract_pdf_content(self, pdf_bytes: bytes) -> str:
#         """
#         从PDF文件中提取文本内容
#         """
#         try:
#             pdf_file = BytesIO(pdf_bytes)
#             pdf_reader = PyPDF2.PdfReader(pdf_file)
#             text = ""
            
#             for page_num in range(len(pdf_reader.pages)):
#                 page = pdf_reader.pages[page_num]
#                 text += page.extract_text() + "\n"
            
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"PDF提取失败: {e}")
#             return ""
    
#     def _extract_docx_content(self, docx_bytes: bytes) -> str:
#         """
#         从Word文档中提取文本内容
#         """
#         try:
#             doc_file = BytesIO(docx_bytes)
#             doc = docx.Document(doc_file)
#             text = ""
            
#             for paragraph in doc.paragraphs:
#                 text += paragraph.text + "\n"
            
#             # 提取表格内容
#             for table in doc.tables:
#                 for row in table.rows:
#                     for cell in row.cells:
#                         text += cell.text + "\t"
#                     text += "\n"
            
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"DOCX提取失败: {e}")
#             return ""
    
#     def _extract_text_content(self, content_bytes: bytes) -> str:
#         """
#         提取纯文本内容，自动检测编码
#         """
#         try:
#             # 检测编码
#             detected = chardet.detect(content_bytes)
#             encoding = detected['encoding'] or 'utf-8'
            
#             text = content_bytes.decode(encoding)
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"文本提取失败: {e}")
#             return ""
    
#     def _clean_text(self, text: str) -> str:
#         """
#         清理文本内容
#         """
#         # 移除多余的空白字符
#         text = re.sub(r'\s+', ' ', text)
#         text = re.sub(r'\n\s*\n', '\n\n', text)
        
#         # 移除特殊字符
#         text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]', '', text)
        
#         # 移除过多的换行
#         text = re.sub(r'\n{3,}', '\n\n', text)
        
#         return text.strip()

# # 使用示例
# if __name__ == "__main__":
#     scraper = WebContentScraper()
    
#     # # 测试不同类型的URL
#     # test_urls = [
#     #     "https://example.com/article.html",  # HTML网页
#     #     "https://example.com/document.pdf",  # PDF文件
#     #     "https://example.com/report.docx",   # Word文档
#     #     "https://example.com/readme.txt"     # 纯文本
#     # ]

#     # 测试不同类型的URL
#     test_urls = [
#         # "https://www.runoob.com/html/html-tutorial.html",  # HTML网页
#         # "http://www.ceec.cn/zyzx/sjhjzz/zzlm/tszs/201604/W020180903671259998212.pdf",  # PDF文件
#         # "https://example.com/report.docx",   # Word文档
#         # "https://example.com/readme.txt"     # 纯文本
#         "https://data.worldbank.org.cn/indicator/NY.GDP.MKTP.KD.ZG?locations=CU"
#     ]
    
#     for url in test_urls:
#         print(f"\n爬取: {url}")
#         content = scraper.scrape(url)
#         if content:
#             # print(f"内容预览: {content[:200]}...")
#             print(f"内容预览: {content}")
#         else:
#             print("爬取失败")


# import requests
# from bs4 import BeautifulSoup
# import PyPDF2
# import docx
# import re
# from urllib.parse import urlparse
# import os
# from io import BytesIO
# import html2text
# import chardet
# from readability import readability
# import justext
# import trafilatura
# from typing import Optional, Union, Dict, List
# import json
# import time
# from selenium import webdriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.chrome.service import Service
# from selenium.common.exceptions import TimeoutException, NoSuchElementException
# import pandas as pd

# class WebContentScraper:
#     """
#     网页内容智能爬取工具
#     支持HTML网页、PDF、Word等多种格式的正文提取
#     特别优化了动态加载内容和表格数据的提取
#     """
    
#     def __init__(self, use_selenium=False):
#         self.headers = {
#             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
#         }
#         self.timeout = 30
#         self.use_selenium = use_selenium
#         self.driver = None
        
#     def _init_selenium(self):
#         """初始化Selenium WebDriver"""
#         if not self.driver:
#             options = Options()
#             options.add_argument('--headless')  # 无头模式
#             options.add_argument('--no-sandbox')
#             options.add_argument('--disable-dev-shm-usage')
#             options.add_argument('--disable-gpu')
#             # 忽略SSL错误
#             options.add_argument('--ignore-certificate-errors')
#             options.add_argument('--ignore-ssl-errors')
#             # 禁用日志
#             options.add_argument('--log-level=3')
#             options.add_experimental_option('excludeSwitches', ['enable-logging'])
#             # 额外的性能优化
#             options.add_argument('--disable-blink-features=AutomationControlled')
#             options.add_argument('--disable-extensions')
#             options.add_argument('--disable-infobars')
            
#             # 设置Chrome日志级别
#             service = Service(log_path='NUL' if os.name == 'nt' else '/dev/null')
#             self.driver = webdriver.Chrome(options=options, service=service)
        
#     def scrape(self, url: str, extract_tables=True, wait_for_element=None) -> Dict[str, Union[str, List[Dict]]]:
#         """
#         主入口：根据URL爬取内容
        
#         Args:
#             url: 目标URL
#             extract_tables: 是否提取表格数据
#             wait_for_element: 等待特定元素加载（CSS选择器）
            
#         Returns:
#             包含正文内容和表格数据的字典
#         """
#         result = {"content": "", "tables": []}
        
#         try:
#             # 先尝试直接请求
#             response = requests.get(url, headers=self.headers, timeout=self.timeout)
#             response.raise_for_status()
            
#             # 获取Content-Type
#             content_type = response.headers.get('Content-Type', '').lower()
            
#             # 根据内容类型处理
#             if 'application/pdf' in content_type:
#                 result["content"] = self._extract_pdf_content(response.content)
#             elif 'text/html' in content_type:
#                 # 对于HTML，检查是否需要处理动态内容
#                 if self._needs_dynamic_loading(url, response.text):
#                     result = self._extract_dynamic_content(url, extract_tables, wait_for_element)
#                 else:
#                     result["content"] = self._extract_html_content(response.text, url)
#                     if extract_tables:
#                         result["tables"] = self._extract_tables_from_html(response.text)
#             elif 'text/plain' in content_type:
#                 result["content"] = self._extract_text_content(response.content)
#             elif 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' in content_type:
#                 result["content"] = self._extract_docx_content(response.content)
#             else:
#                 # 尝试从URL扩展名判断
#                 parsed_url = urlparse(url)
#                 path = parsed_url.path.lower()
                
#                 if path.endswith('.pdf'):
#                     result["content"] = self._extract_pdf_content(response.content)
#                 elif path.endswith('.docx'):
#                     result["content"] = self._extract_docx_content(response.content)
#                 elif path.endswith('.txt'):
#                     result["content"] = self._extract_text_content(response.content)
#                 else:
#                     # 检查是否需要动态加载
#                     if self._needs_dynamic_loading(url, response.text):
#                         result = self._extract_dynamic_content(url, extract_tables, wait_for_element)
#                     else:
#                         result["content"] = self._extract_html_content(response.text, url)
#                         if extract_tables:
#                             result["tables"] = self._extract_tables_from_html(response.text)
                    
#         except Exception as e:
#             print(f"爬取失败: {e}")
            
#         return result
    
#     def _needs_dynamic_loading(self, url: str, html: str) -> bool:
#         """判断页面是否需要动态加载"""
#         # 检查常见的动态加载迹象
#         dynamic_indicators = [
#             'worldbank.org',
#             'data.worldbank',
#             'vue-app',
#             'react-root',
#             'angular',
#             'ember-application',
#             'window.__INITIAL_STATE__',
#             'window.PRELOADED_STATE',
#             '__NEXT_DATA__'
#         ]
        
#         for indicator in dynamic_indicators:
#             if indicator in url or indicator in html:
#                 return True
        
#         # 检查是否有大量的JavaScript
#         script_count = html.count('<script')
#         if script_count > 10:
#             return True
            
#         return False
    
#     def _extract_dynamic_content(self, url: str, extract_tables=True, wait_for_element=None) -> Dict:
#         """使用Selenium提取动态加载的内容"""
#         self._init_selenium()
#         result = {"content": "", "tables": []}
        
#         try:
#             self.driver.get(url)
            
#             # 等待页面加载
#             if wait_for_element:
#                 WebDriverWait(self.driver, 20).until(
#                     EC.presence_of_element_located((By.CSS_SELECTOR, wait_for_element))
#                 )
#             else:
#                 # 对于世界银行等数据网站，等待表格加载
#                 if 'worldbank' in url:
#                     try:
#                         # 等待图表或表格加载
#                         WebDriverWait(self.driver, 20).until(
#                             EC.presence_of_element_located((By.CSS_SELECTOR, 'table, .chart-container, .data-table, svg'))
#                         )
#                     except:
#                         pass
#                 time.sleep(3)  # 额外等待确保数据加载完成
            
#             # 获取页面源码
#             page_source = self.driver.page_source
            
#             # 提取正文
#             result["content"] = self._extract_html_content(page_source, url)
            
#             # 提取表格数据
#             if extract_tables:
#                 result["tables"] = self._extract_tables_from_selenium()
                
#                 # 特殊处理世界银行数据
#                 if 'worldbank' in url:
#                     world_bank_data = self._extract_worldbank_data()
#                     if world_bank_data:
#                         result["tables"].append(world_bank_data)
                        
#         except Exception as e:
#             print(f"动态内容提取失败: {e}")
            
#         return result
    
#     def _extract_worldbank_data(self) -> Optional[Dict]:
#         """专门提取世界银行数据"""
#         try:
#             # 等待数据完全加载
#             time.sleep(3)
            
#             # 方法1：查找数据表格
#             tables = self.driver.find_elements(By.CSS_SELECTOR, 'table, .table, [role="table"], .data-table')
#             for table in tables:
#                 if table.is_displayed() and table.text.strip():
#                     parsed_table = self._parse_table_element(table)
#                     if parsed_table and parsed_table.get('data'):
#                         return parsed_table
            
#             # 方法2：检查React/Vue数据
#             js_data = self.driver.execute_script("""
#                 // 检查各种可能的数据存储位置
#                 if (window.__REACT_DATA__) return window.__REACT_DATA__;
#                 if (window.__INITIAL_STATE__) return window.__INITIAL_STATE__;
#                 if (window.APP_DATA) return window.APP_DATA;
#                 if (window.pageData) return window.pageData;
                
#                 // 检查组件状态
#                 const reactRoot = document.querySelector('#root, #app, .app-container');
#                 if (reactRoot && reactRoot._reactRootContainer) {
#                     const fiber = reactRoot._reactRootContainer._internalRoot.current;
#                     if (fiber && fiber.memoizedState) {
#                         return fiber.memoizedState;
#                     }
#                 }
                
#                 // 查找包含数据的元素
#                 const dataElements = document.querySelectorAll('[data-year], [data-value], .data-point');
#                 if (dataElements.length > 0) {
#                     const data = [];
#                     dataElements.forEach(el => {
#                         const year = el.getAttribute('data-year') || el.textContent.match(/\d{4}/)?.[0];
#                         const value = el.getAttribute('data-value') || el.textContent.match(/[\d.-]+/)?.[0];
#                         if (year && value) {
#                             data.push({year, value});
#                         }
#                     });
#                     return {type: 'extracted', data};
#                 }
                
#                 return null;
#             """)
            
#             if js_data:
#                 return {"type": "js_data", "data": js_data}
            
#             # 方法3：查找图表数据
#             svg_elements = self.driver.find_elements(By.CSS_SELECTOR, 'svg, .chart, .graph')
#             if svg_elements:
#                 # 尝试提取图表的数据
#                 chart_data = self.driver.execute_script("""
#                     const charts = document.querySelectorAll('.chart-container, .graph-container');
#                     for (const chart of charts) {
#                         const dataPoints = chart.querySelectorAll('.data-point, .bar, .line-point');
#                         if (dataPoints.length > 0) {
#                             const data = [];
#                             dataPoints.forEach(point => {
#                                 const title = point.getAttribute('title') || point.getAttribute('data-original-title');
#                                 if (title) {
#                                     data.push(title);
#                                 }
#                             });
#                             return data;
#                         }
#                     }
#                     return null;
#                 """)
                
#                 if chart_data:
#                     return {"type": "chart_data", "data": chart_data}
            
#             # 方法4：检查API响应
#             api_data = self.driver.execute_script("""
#                 // 检查网络请求的响应
#                 if (performance && performance.getEntriesByType) {
#                     const entries = performance.getEntriesByType('resource');
#                     for (const entry of entries) {
#                         if (entry.name.includes('api') || entry.name.includes('data')) {
#                             // 这里无法直接获取响应内容，但可以知道有API调用
#                             return {type: 'api_detected', url: entry.name};
#                         }
#                     }
#                 }
#                 return null;
#             """)
            
#             if api_data:
#                 print(f"检测到API调用: {api_data}")
                
#         except Exception as e:
#             print(f"世界银行数据提取失败: {e}")
            
#         return None
    
#     def _extract_tables_from_selenium(self) -> List[Dict]:
#         """从Selenium加载的页面中提取表格"""
#         tables = []
#         try:
#             table_elements = self.driver.find_elements(By.TAG_NAME, 'table')
#             for table in table_elements:
#                 table_data = self._parse_table_element(table)
#                 if table_data and table_data.get('data'):
#                     tables.append(table_data)
#         except Exception as e:
#             print(f"表格提取失败: {e}")
#         return tables
    
#     def _parse_table_element(self, table_element) -> Dict:
#         """解析表格元素"""
#         try:
#             # 获取表格标题
#             title = ""
#             caption = table_element.find_elements(By.TAG_NAME, 'caption')
#             if caption:
#                 title = caption[0].text
            
#             # 获取表头
#             headers = []
#             thead = table_element.find_elements(By.TAG_NAME, 'thead')
#             if thead:
#                 th_elements = thead[0].find_elements(By.TAG_NAME, 'th')
#                 headers = [th.text for th in th_elements]
#             else:
#                 # 尝试从第一行获取表头
#                 first_row = table_element.find_elements(By.TAG_NAME, 'tr')
#                 if first_row:
#                     th_elements = first_row[0].find_elements(By.TAG_NAME, 'th')
#                     if th_elements:
#                         headers = [th.text for th in th_elements]
            
#             # 获取数据行
#             rows = []
#             tbody = table_element.find_elements(By.TAG_NAME, 'tbody')
#             if tbody:
#                 tr_elements = tbody[0].find_elements(By.TAG_NAME, 'tr')
#             else:
#                 tr_elements = table_element.find_elements(By.TAG_NAME, 'tr')
            
#             for tr in tr_elements:
#                 cells = tr.find_elements(By.TAG_NAME, 'td')
#                 if not cells:  # 跳过只有th的行
#                     continue
#                 row_data = [cell.text for cell in cells]
#                 if row_data:
#                     rows.append(row_data)
            
#             return {
#                 "title": title,
#                 "headers": headers,
#                 "data": rows
#             }
#         except Exception as e:
#             print(f"表格解析失败: {e}")
#             return {}
    
#     def _extract_tables_from_html(self, html: str) -> List[Dict]:
#         """从HTML中提取表格数据"""
#         soup = BeautifulSoup(html, 'html.parser')
#         tables = []
        
#         for table in soup.find_all('table'):
#             table_data = {
#                 "title": "",
#                 "headers": [],
#                 "data": []
#             }
            
#             # 获取表格标题
#             caption = table.find('caption')
#             if caption:
#                 table_data["title"] = caption.text.strip()
            
#             # 获取表头
#             thead = table.find('thead')
#             if thead:
#                 headers = [th.text.strip() for th in thead.find_all('th')]
#                 table_data["headers"] = headers
#             else:
#                 # 尝试从第一行获取表头
#                 first_row = table.find('tr')
#                 if first_row:
#                     headers = [th.text.strip() for th in first_row.find_all('th')]
#                     if headers:
#                         table_data["headers"] = headers
            
#             # 获取数据
#             tbody = table.find('tbody')
#             rows = tbody.find_all('tr') if tbody else table.find_all('tr')
            
#             for row in rows:
#                 cells = row.find_all('td')
#                 if cells:
#                     row_data = [cell.text.strip() for cell in cells]
#                     table_data["data"].append(row_data)
            
#             if table_data["data"]:
#                 tables.append(table_data)
        
#         return tables
    
#     def _extract_html_content(self, html: str, url: str) -> str:
#         """从HTML中提取正文内容"""
#         # 方法1：使用trafilatura
#         try:
#             content = trafilatura.extract(html, include_comments=False, 
#                                         include_tables=True, no_fallback=False,
#                                         url=url)
#             if content and len(content) > 100:
#                 return content
#         except:
#             pass
        
#         # 方法2：使用readability
#         try:
#             doc = readability(html, url)
#             summary = doc.summary()
#             soup = BeautifulSoup(summary, 'html.parser')
#             content = soup.get_text(strip=True)
#             if content and len(content) > 100:
#                 return self._clean_text(content)
#         except:
#             pass
        
#         # 方法3：基础BeautifulSoup提取
#         return self._basic_html_extract(html)
    
#     def _basic_html_extract(self, html: str) -> str:
#         """基础HTML内容提取方法"""
#         soup = BeautifulSoup(html, 'html.parser')
        
#         # 移除脚本和样式
#         for script in soup(["script", "style", "noscript"]):
#             script.decompose()
        
#         # 移除广告相关标签
#         ad_keywords = ['ad', 'ads', 'advertisement', 'sponsor', 'promo', 'banner']
#         for keyword in ad_keywords:
#             for tag in soup.find_all(class_=re.compile(keyword, re.I)):
#                 tag.decompose()
#             for tag in soup.find_all(id=re.compile(keyword, re.I)):
#                 tag.decompose()
        
#         # 提取主要内容区域
#         main_content = None
#         for tag in ['main', 'article', 'section']:
#             main_content = soup.find(tag)
#             if main_content:
#                 break
        
#         if main_content:
#             text = main_content.get_text(strip=True)
#         else:
#             text = soup.get_text(strip=True)
        
#         return self._clean_text(text)
    
#     def _extract_pdf_content(self, pdf_bytes: bytes) -> str:
#         """从PDF文件中提取文本内容"""
#         try:
#             pdf_file = BytesIO(pdf_bytes)
#             pdf_reader = PyPDF2.PdfReader(pdf_file)
#             text = ""
            
#             for page_num in range(len(pdf_reader.pages)):
#                 page = pdf_reader.pages[page_num]
#                 text += page.extract_text() + "\n"
            
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"PDF提取失败: {e}")
#             return ""
    
#     def _extract_docx_content(self, docx_bytes: bytes) -> str:
#         """从Word文档中提取文本内容"""
#         try:
#             doc_file = BytesIO(docx_bytes)
#             doc = docx.Document(doc_file)
#             text = ""
            
#             for paragraph in doc.paragraphs:
#                 text += paragraph.text + "\n"
            
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"DOCX提取失败: {e}")
#             return ""
    
#     def _extract_text_content(self, content_bytes: bytes) -> str:
#         """提取纯文本内容"""
#         try:
#             detected = chardet.detect(content_bytes)
#             encoding = detected['encoding'] or 'utf-8'
#             text = content_bytes.decode(encoding)
#             return self._clean_text(text)
#         except Exception as e:
#             print(f"文本提取失败: {e}")
#             return ""
    
#     def _clean_text(self, text: str) -> str:
#         """清理文本内容"""
#         text = re.sub(r'\s+', ' ', text)
#         text = re.sub(r'\n\s*\n', '\n\n', text)
#         text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]', '', text)
#         text = re.sub(r'\n{3,}', '\n\n', text)
#         return text.strip()
    
#     def close(self):
#         """关闭Selenium driver"""
#         if self.driver:
#             self.driver.quit()
#             self.driver = None

# # 使用示例
# if __name__ == "__main__":
#     # # 创建支持动态内容的爬虫
#     # scraper = WebContentScraper(use_selenium=True)
    
#     # # 爬取世界银行数据
#     # url = "https://data.worldbank.org.cn/indicator/NY.GDP.MKTP.KD.ZG?locations=CU"
#     # result = scraper.scrape(url, extract_tables=True)
    
#     # print("正文内容:")
#     # print(result["content"][:500] + "..." if result["content"] else "无内容")
    
#     # print("\n表格数据:")
#     # for i, table in enumerate(result["tables"]):
#     #     print(f"\n表格 {i+1}:")
#     #     print(f"标题: {table.get('title', '无标题')}")
#     #     print(f"表头: {table.get('headers', [])}")
#     #     print(f"数据行数: {len(table.get('data', []))}")
#     #     if table.get('data'):
#     #         print("前5行数据:")
#     #         for row in table['data'][:5]:
#     #             print(row)
    
#     # # 关闭浏览器
#     # scraper.close()
    
#     # 普通网页爬取示例
#     scraper2 = WebContentScraper()  # 不使用Selenium
#     # url2 = r"https://zh.wikipedia.org/zh-cn/%E5%A4%9A%E6%98%8E%E5%B0%BC%E5%8A%A0"
#     # url2 = r" http://baike.baidu.com/item/%E4%BA%9A%E9%A9%AC%E5%AD%99%E4%B8%9B%E6%9E%97/60662589?noadapt=1"
#     url2 = "http://www.360doc.com/content/22/1130/04/39305010_1058178720.shtml"
#     result2 = scraper2.scrape(url2)
#     # print(f"\n普通网页内容: {result2['content'][:200]}...")
#     print(f"\n普通网页内容: {result2['content']}")


"""
下面的代码可以
1.自动检测反爬虫网站：通过预定义的域名列表识别常见的反爬虫网站

2.智能选择爬取方法：
对于反爬虫网站：自动使用Selenium并应用反检测技术
对于动态内容网站：使用Selenium等待加载
对于静态网站：使用传统的requests方法

3.反爬虫绕过技术：
使用完整的浏览器请求头
隐藏自动化特征
设置合适的Referer和Cookie
先访问主页建立会话
模拟真实用户行为（滚动等）

4.特定网站优化：
为百度、360doc等网站设置专门的请求头
使用CDP命令隐藏webdriver特征
"""
import requests
from bs4 import BeautifulSoup
import PyPDF2
import docx
import re
from urllib.parse import urlparse
import os
from io import BytesIO
import html2text
import chardet
from readability import readability
import justext
import trafilatura
from typing import Optional, Union, Dict, List
import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.common.exceptions import TimeoutException, NoSuchElementException
import pandas as pd
from urllib.parse import urlparse

class WebContentScraper:
    """
    智能网页内容爬取工具
    支持自动判断是否需要动态加载
    支持HTML网页、PDF、Word等多种格式的正文提取
    特别优化了动态加载内容和表格数据的提取
    """
    
    def __init__(self, auto_detect=True):
        """
        初始化爬虫
        
        Args:
            auto_detect: 是否自动检测需要动态加载的页面
        """
        # 更完整的请求头，模拟真实浏览器
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }
        self.timeout = 30
        self.auto_detect = auto_detect
        self.driver = None
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        
        # 需要动态加载的域名列表
        self.dynamic_domains = [
            'worldbank.org',
            'data.worldbank',
            'bloomberg.com',
            'finance.yahoo.com',
            'tradingview.com',
            'investing.com',
            'marketwatch.com',
            'twitter.com',
            'x.com',
            'facebook.com',
            'instagram.com',
            'linkedin.com',
            'youtube.com',
            'tiktok.com',
            'airbnb.com',
            'maps.google.com',
            'google.com/maps',
            'amazon.com',
            'ebay.com',
            'netflix.com',
            'spotify.com'
        ]
        
        # 反爬虫网站列表
        self.anti_scraping_domains = [
            'baidu.com',
            'baike.baidu.com',
            '360doc.com',
            'zhihu.com',
            'weibo.com',
            'taobao.com',
            'tmall.com',
            'jd.com',
            'douban.com',
            'sohu.com',
            'sina.com',
            'toutiao.com',
            'csdn.net',
            'jianshu.com'
        ]
        
        # 动态内容的特征
        self.dynamic_indicators = [
            'react-root',
            'vue-app',
            'angular',
            'ember-application',
            '__NEXT_DATA__',
            '__NUXT__',
            '__INITIAL_STATE__',
            'window.PRELOADED_STATE',
            'data-react',
            'data-vue',
            'ng-app',
            'backbone',
            'knockout',
            '__APOLLO_STATE__',
            'Blazor',
            '{"props":{"pageProps"'
        ]
        
        # 静态内容的特征
        self.static_indicators = [
            'wikipedia.org',
            'github.com',
            'stackoverflow.com',
            'medium.com',
            'wordpress.com',
            'blogspot.com',
            '.edu',
            '.gov',
            'news.ycombinator.com',
            'reddit.com',
            'arxiv.org'
        ]
        
    def _needs_dynamic_loading(self, url: str, html: str = None) -> bool:
        """
        智能判断页面是否需要动态加载
        
        Args:
            url: 目标URL
            html: 页面HTML内容（可选）
            
        Returns:
            bool: 是否需要动态加载
        """
        # 解析URL
        parsed_url = urlparse(url)
        domain = parsed_url.netloc.lower()
        
        # 1. 检查域名黑名单（确定需要动态加载的网站）
        for dynamic_domain in self.dynamic_domains:
            if dynamic_domain in domain:
                print(f"检测到动态网站域名: {dynamic_domain}")
                return True
        
        # 2. 检查白名单（确定不需要动态加载的网站）
        for static_domain in self.static_indicators:
            if static_domain in domain:
                print(f"检测到静态网站域名: {static_domain}")
                return False
        
        # 3. 分析URL结构
        path = parsed_url.path.lower()
        # API端点通常返回JSON，不需要动态加载
        if '/api/' in path or path.endswith('.json'):
            return False
        
        # 4. 分析查询参数
        query = parsed_url.query.lower()
        # 数据可视化页面经常使用这些参数
        if any(param in query for param in ['chart', 'graph', 'data', 'visualization']):
            return True
        
        # 5. 如果有HTML内容，进行深入分析
        if html:
            # 检查页面大小
            html_size = len(html)
            
            # 如果页面很小，可能是框架页面
            if html_size < 5000:
                print(f"页面大小较小 ({html_size} bytes)，可能是SPA")
                return True
            
            # 检查JavaScript框架标记
            for indicator in self.dynamic_indicators:
                if indicator in html:
                    print(f"检测到动态内容标记: {indicator}")
                    return True
            
            # 统计页面特征
            soup = BeautifulSoup(html, 'html.parser')
            
            # 统计脚本标签数量
            script_count = len(soup.find_all('script'))
            
            # 统计有效内容
            text_content = soup.get_text(strip=True)
            text_length = len(text_content)
            
            # 计算脚本密度
            script_density = script_count / (html_size / 1000) if html_size > 0 else 0
            
            # 计算内容密度
            content_density = text_length / html_size if html_size > 0 else 0
            
            # 检查是否有数据容器
            data_containers = soup.find_all(['div', 'section'], class_=re.compile(r'data|chart|graph|table|grid', re.I))
            
            # 决策逻辑
            if script_density > 5 and content_density < 0.1:
                print(f"高脚本密度 ({script_density:.2f}) + 低内容密度 ({content_density:.2f})")
                return True
            
            if script_count > 20 and text_length < 500:
                print(f"大量脚本 ({script_count}) + 少量文本 ({text_length})")
                return True
            
            if len(data_containers) > 0 and script_count > 10:
                print(f"发现数据容器 + 脚本数量: {script_count}")
                return True
            
            # 检查是否有延迟加载标记
            lazy_indicators = ['loading="lazy"', 'data-src', 'data-lazy', 'skeleton', 'placeholder']
            for indicator in lazy_indicators:
                if indicator in html:
                    print(f"检测到延迟加载标记: {indicator}")
                    return True
                    
        # 默认情况下不使用动态加载
        return False
    
    def scrape(self, url: str, extract_tables=True, wait_for_element=None, force_dynamic=None) -> Dict[str, Union[str, List[Dict]]]:
        """
        主入口：根据URL爬取内容
        
        Args:
            url: 目标URL
            extract_tables: 是否提取表格数据
            wait_for_element: 等待特定元素加载（CSS选择器）
            force_dynamic: 强制使用动态加载（True）或静态加载（False），None表示自动判断
            
        Returns:
            包含正文内容和表格数据的字典
        """
        result = {"content": "", "tables": [], "method": "unknown"}
        
        # 特殊处理反爬虫网站
        if self._is_anti_scraping_site(url):
            print(f"检测到反爬虫网站，使用Selenium模式")
            return self._extract_with_anti_scraping_bypass(url, extract_tables)
        
        try:
            # 第一步：发送请求获取初始内容
            print(f"正在爬取: {url}")
            
            # 设置特定网站的请求头
            headers = self._get_site_specific_headers(url)
            
            response = self.session.get(url, headers=headers, timeout=self.timeout)
            response.raise_for_status()
            
            # 获取Content-Type
            content_type = response.headers.get('Content-Type', '').lower()
            
            # 处理非HTML内容类型
            if 'application/pdf' in content_type:
                result["content"] = self._extract_pdf_content(response.content)
                result["method"] = "pdf"
                return result
            elif 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' in content_type:
                result["content"] = self._extract_docx_content(response.content)
                result["method"] = "docx"
                return result
            elif 'text/plain' in content_type:
                result["content"] = self._extract_text_content(response.content)
                result["method"] = "text"
                return result
            
            # 对于HTML内容，判断是否需要动态加载
            if force_dynamic is None and self.auto_detect:
                needs_dynamic = self._needs_dynamic_loading(url, response.text)
                print(f"自动检测结果: {'需要动态加载' if needs_dynamic else '使用静态方法'}")
            else:
                needs_dynamic = force_dynamic if force_dynamic is not None else False
                print(f"强制模式: {'动态加载' if needs_dynamic else '静态方法'}")
            
            if needs_dynamic:
                # 使用Selenium进行动态加载
                result = self._extract_dynamic_content(url, extract_tables, wait_for_element)
                result["method"] = "selenium"
            else:
                # 使用静态方法提取
                result["content"] = self._extract_html_content(response.text, url)
                if extract_tables:
                    result["tables"] = self._extract_tables_from_html(response.text)
                result["method"] = "static"
                
        except Exception as e:
            print(f"爬取失败: {e}")
            result["error"] = str(e)
            
        return result
    
    def _init_selenium(self):
        """初始化Selenium WebDriver"""
        if not self.driver:
            options = Options()
            # 隐藏自动化特征
            options.add_experimental_option("excludeSwitches", ["enable-automation"])
            options.add_experimental_option('useAutomationExtension', False)
            options.add_argument('--disable-blink-features=AutomationControlled')
            
            # 设置用户代理
            options.add_argument(f'user-agent={self.headers["User-Agent"]}')
            
            # 其他配置
            options.add_argument('--headless')  # 无头模式
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-gpu')
            options.add_argument('--ignore-certificate-errors')
            options.add_argument('--ignore-ssl-errors')
            options.add_argument('--log-level=3')
            options.add_argument('--disable-extensions')
            options.add_argument('--disable-infobars')
            
            # 设置语言
            options.add_argument('--lang=zh-CN')
            
            # 设置窗口大小
            options.add_argument('--window-size=1920,1080')
            
            # 设置Chrome日志级别
            service = Service(log_path='NUL' if os.name == 'nt' else '/dev/null')
            self.driver = webdriver.Chrome(options=options, service=service)
            self.driver.set_page_load_timeout(30)
            
            # 执行反检测脚本
            self.driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
                'source': '''
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => false
                    });
                    window.chrome = {
                        runtime: {}
                    };
                    Object.defineProperty(navigator, 'plugins', {
                        get: () => [1, 2, 3, 4, 5]
                    });
                    Object.defineProperty(navigator, 'languages', {
                        get: () => ['zh-CN', 'zh', 'en']
                    });
                '''
            })
    
    def _extract_dynamic_content(self, url: str, extract_tables=True, wait_for_element=None) -> Dict:
        """使用Selenium提取动态加载的内容"""
        self._init_selenium()
        result = {"content": "", "tables": []}
        
        try:
            print("使用Selenium加载页面...")
            self.driver.get(url)
            
            # 等待页面加载
            if wait_for_element:
                print(f"等待元素: {wait_for_element}")
                WebDriverWait(self.driver, 20).until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, wait_for_element))
                )
            else:
                # 智能等待策略
                if 'worldbank' in url:
                    try:
                        print("等待世界银行数据加载...")
                        WebDriverWait(self.driver, 20).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, 'table, .chart-container, .data-table, svg, .series-list'))
                        )
                    except:
                        pass
                elif any(indicator in url for indicator in ['chart', 'graph', 'data', 'visualization']):
                    try:
                        print("等待图表数据加载...")
                        WebDriverWait(self.driver, 15).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, 'canvas, svg, .chart, .graph'))
                        )
                    except:
                        pass
                else:
                    # 通用等待策略
                    try:
                        WebDriverWait(self.driver, 10).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, 'main, article, #content, .content'))
                        )
                    except:
                        pass
                
                # 额外等待确保数据加载完成
                time.sleep(3)
            
            # 滚动页面以触发延迟加载
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(1)
            self.driver.execute_script("window.scrollTo(0, 0);")
            
            # 获取页面源码
            page_source = self.driver.page_source
            
            # 提取正文
            result["content"] = self._extract_html_content(page_source, url)
            
            # 提取表格数据
            if extract_tables:
                result["tables"] = self._extract_tables_from_selenium()
                
                # 特殊处理数据网站
                if any(domain in url for domain in ['worldbank', 'data.', 'statistics']):
                    special_data = self._extract_special_data()
                    if special_data:
                        result["tables"].append(special_data)
                        
        except TimeoutException:
            print("页面加载超时")
        except Exception as e:
            print(f"动态内容提取失败: {e}")
            
        return result
    
    def _extract_special_data(self) -> Optional[Dict]:
        """提取特殊网站的数据（如世界银行）"""
        try:
            # 等待数据完全加载
            time.sleep(2)
            
            # 尝试多种方法提取数据
            
            # 方法1：查找各种类型的数据表格
            table_selectors = [
                'table',
                '.table',
                '[role="table"]',
                '.data-table',
                '.series-list',
                '.indicator-data',
                '.time-series-data'
            ]
            
            for selector in table_selectors:
                tables = self.driver.find_elements(By.CSS_SELECTOR, selector)
                for table in tables:
                    if table.is_displayed() and table.text.strip():
                        parsed_table = self._parse_table_element(table)
                        if parsed_table and parsed_table.get('data'):
                            return parsed_table
            
            # 方法2：提取JavaScript数据
            js_data = self.driver.execute_script("""
                // 检查各种可能的数据存储位置
                const dataLocations = [
                    window.__REACT_DATA__,
                    window.__INITIAL_STATE__,
                    window.__NEXT_DATA__,
                    window.APP_DATA,
                    window.pageData,
                    window.dataLayer,
                    window.chartData,
                    window.seriesData
                ];
                
                for (const data of dataLocations) {
                    if (data && typeof data === 'object') {
                        return data;
                    }
                }
                
                // 查找包含数据的元素
                const dataElements = document.querySelectorAll('[data-year], [data-value], [data-point], .data-point, .chart-data');
                if (dataElements.length > 0) {
                    const extractedData = [];
                    dataElements.forEach(el => {
                        const text = el.textContent || el.innerText;
                        const year = el.getAttribute('data-year') || text.match(/\d{4}/)?.[0];
                        const value = el.getAttribute('data-value') || text.match(/[\d.-]+/)?.[0];
                        if (year || value) {
                            extractedData.push({
                                year: year,
                                value: value,
                                text: text
                            });
                        }
                    });
                    return {type: 'extracted', data: extractedData};
                }
                
                return null;
            """)
            
            if js_data:
                return {"type": "js_data", "data": js_data}
            
            # 方法3：查找图表并提取数据
            chart_data = self.driver.execute_script("""
                // 查找图表容器
                const chartContainers = document.querySelectorAll('.chart-container, .graph-container, svg.chart, canvas');
                for (const container of chartContainers) {
                    // 尝试从标题属性提取数据
                    const dataPoints = container.querySelectorAll('[title], [data-original-title], [aria-label]');
                    if (dataPoints.length > 0) {
                        const data = [];
                        dataPoints.forEach(point => {
                            const info = point.getAttribute('title') || 
                                       point.getAttribute('data-original-title') || 
                                       point.getAttribute('aria-label');
                            if (info) {
                                data.push(info);
                            }
                        });
                        if (data.length > 0) {
                            return {type: 'chart_tooltips', data: data};
                        }
                    }
                }
                
                // 检查Canvas图表的数据
                const canvases = document.querySelectorAll('canvas');
                for (const canvas of canvases) {
                    const ctx = canvas.getContext('2d');
                    if (ctx && ctx.canvas && ctx.canvas.__chart) {
                        return {type: 'chart_js', data: ctx.canvas.__chart.data};
                    }
                }
                
                return null;
            """)
            
            if chart_data:
                return chart_data
                
        except Exception as e:
            print(f"特殊数据提取失败: {e}")
            
        return None
    
    def _extract_tables_from_selenium(self) -> List[Dict]:
        """从Selenium加载的页面中提取表格"""
        tables = []
        try:
            table_elements = self.driver.find_elements(By.TAG_NAME, 'table')
            for table in table_elements:
                if table.is_displayed():  # 只处理可见的表格
                    table_data = self._parse_table_element(table)
                    if table_data and table_data.get('data'):
                        tables.append(table_data)
        except Exception as e:
            print(f"表格提取失败: {e}")
        return tables
    
    def _parse_table_element(self, table_element) -> Dict:
        """解析表格元素"""
        try:
            # 获取表格标题
            title = ""
            caption = table_element.find_elements(By.TAG_NAME, 'caption')
            if caption:
                title = caption[0].text
            
            # 获取表头
            headers = []
            thead = table_element.find_elements(By.TAG_NAME, 'thead')
            if thead:
                th_elements = thead[0].find_elements(By.TAG_NAME, 'th')
                headers = [th.text.strip() for th in th_elements]
            else:
                # 尝试从第一行获取表头
                first_row = table_element.find_elements(By.TAG_NAME, 'tr')
                if first_row:
                    th_elements = first_row[0].find_elements(By.TAG_NAME, 'th')
                    if th_elements:
                        headers = [th.text.strip() for th in th_elements]
            
            # 获取数据行
            rows = []
            tbody = table_element.find_elements(By.TAG_NAME, 'tbody')
            if tbody:
                tr_elements = tbody[0].find_elements(By.TAG_NAME, 'tr')
            else:
                tr_elements = table_element.find_elements(By.TAG_NAME, 'tr')
            
            for tr in tr_elements:
                cells = tr.find_elements(By.TAG_NAME, 'td')
                if not cells:  # 跳过只有th的行
                    continue
                row_data = [cell.text.strip() for cell in cells]
                if any(row_data):  # 至少有一个非空单元格
                    rows.append(row_data)
            
            return {
                "title": title,
                "headers": headers,
                "data": rows
            }
        except Exception as e:
            print(f"表格解析失败: {e}")
            return {}
    
    # ... [其余方法保持不变] ...
    
    def _extract_tables_from_html(self, html: str) -> List[Dict]:
        """从HTML中提取表格数据"""
        soup = BeautifulSoup(html, 'html.parser')
        tables = []
        
        for table in soup.find_all('table'):
            table_data = {
                "title": "",
                "headers": [],
                "data": []
            }
            
            # 获取表格标题
            caption = table.find('caption')
            if caption:
                table_data["title"] = caption.text.strip()
            
            # 获取表头
            thead = table.find('thead')
            if thead:
                headers = [th.text.strip() for th in thead.find_all('th')]
                table_data["headers"] = headers
            else:
                # 尝试从第一行获取表头
                first_row = table.find('tr')
                if first_row:
                    headers = [th.text.strip() for th in first_row.find_all('th')]
                    if headers:
                        table_data["headers"] = headers
            
            # 获取数据
            tbody = table.find('tbody')
            rows = tbody.find_all('tr') if tbody else table.find_all('tr')
            
            for row in rows:
                cells = row.find_all('td')
                if cells:
                    row_data = [cell.text.strip() for cell in cells]
                    table_data["data"].append(row_data)
            
            if table_data["data"]:
                tables.append(table_data)
        
        return tables
    
    def _extract_html_content(self, html: str, url: str) -> str:
        """从HTML中提取正文内容"""
        # 方法1：使用trafilatura
        try:
            content = trafilatura.extract(html, include_comments=False, 
                                        include_tables=True, no_fallback=False,
                                        url=url)
            if content and len(content) > 100:
                return content
        except:
            pass
        
        # 方法2：使用readability
        try:
            doc = readability(html, url)
            summary = doc.summary()
            soup = BeautifulSoup(summary, 'html.parser')
            content = soup.get_text(strip=True)
            if content and len(content) > 100:
                return self._clean_text(content)
        except:
            pass
        
        # 方法3：基础BeautifulSoup提取
        return self._basic_html_extract(html)
    
    def _basic_html_extract(self, html: str) -> str:
        """基础HTML内容提取方法"""
        soup = BeautifulSoup(html, 'html.parser')
        
        # 移除脚本和样式
        for script in soup(["script", "style", "noscript"]):
            script.decompose()
        
        # 移除广告相关标签
        ad_keywords = ['ad', 'ads', 'advertisement', 'sponsor', 'promo', 'banner']
        for keyword in ad_keywords:
            for tag in soup.find_all(class_=re.compile(keyword, re.I)):
                tag.decompose()
            for tag in soup.find_all(id=re.compile(keyword, re.I)):
                tag.decompose()
        
        # 提取主要内容区域
        main_content = None
        for tag in ['main', 'article', 'section']:
            main_content = soup.find(tag)
            if main_content:
                break
        
        if main_content:
            text = main_content.get_text(strip=True)
        else:
            text = soup.get_text(strip=True)
        
        return self._clean_text(text)
    
    def _extract_pdf_content(self, pdf_bytes: bytes) -> str:
        """从PDF文件中提取文本内容"""
        try:
            pdf_file = BytesIO(pdf_bytes)
            pdf_reader = PyPDF2.PdfReader(pdf_file)
            text = ""
            
            for page_num in range(len(pdf_reader.pages)):
                page = pdf_reader.pages[page_num]
                text += page.extract_text() + "\n"
            
            return self._clean_text(text)
        except Exception as e:
            print(f"PDF提取失败: {e}")
            return ""
    
    def _extract_docx_content(self, docx_bytes: bytes) -> str:
        """从Word文档中提取文本内容"""
        try:
            doc_file = BytesIO(docx_bytes)
            doc = docx.Document(doc_file)
            text = ""
            
            for paragraph in doc.paragraphs:
                text += paragraph.text + "\n"
            
            return self._clean_text(text)
        except Exception as e:
            print(f"DOCX提取失败: {e}")
            return ""
    
    def _extract_text_content(self, content_bytes: bytes) -> str:
        """提取纯文本内容"""
        try:
            detected = chardet.detect(content_bytes)
            encoding = detected['encoding'] or 'utf-8'
            text = content_bytes.decode(encoding)
            return self._clean_text(text)
        except Exception as e:
            print(f"文本提取失败: {e}")
            return ""
    
    def _clean_text(self, text: str) -> str:
        """清理文本内容"""
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'\n\s*\n', '\n\n', text)
        text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]', '', text)
        text = re.sub(r'\n{3,}', '\n\n', text)
        return text.strip()
    
    def close(self):
        """关闭Selenium driver"""
        if self.driver:
            self.driver.quit()
            self.driver = None
    
    def _is_anti_scraping_site(self, url: str) -> bool:
        """判断是否是反爬虫网站"""
        parsed_url = urlparse(url)
        domain = parsed_url.netloc.lower()
        
        for anti_domain in self.anti_scraping_domains:
            if anti_domain in domain:
                return True
        return False
    
    def _get_site_specific_headers(self, url: str) -> dict:
        """获取特定网站的请求头"""
        headers = self.headers.copy()
        parsed_url = urlparse(url)
        domain = parsed_url.netloc.lower()
        
        # 百度相关网站
        if 'baidu.com' in domain:
            headers.update({
                'Referer': 'https://www.baidu.com/',
                'Cookie': 'BAIDUID=FAKE_BAIDUID:FG=1',
                'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
                'Sec-Ch-Ua-Mobile': '?0',
                'Sec-Ch-Ua-Platform': '"Windows"',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'none',
                'Sec-Fetch-User': '?1'
            })
        
        # 360doc网站
        elif '360doc.com' in domain:
            headers.update({
                'Referer': 'http://www.360doc.com/',
                'Cookie': 'PHPSESSID=fake_session_id',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'same-origin'
            })
        
        return headers
    
    def _extract_with_anti_scraping_bypass(self, url: str, extract_tables: bool = True) -> Dict:
        """使用反爬虫绕过技术提取内容"""
        self._init_selenium()
        result = {"content": "", "tables": [], "method": "selenium_anti_scraping"}
        
        try:
            # 首先访问主页建立会话
            parsed_url = urlparse(url)
            base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
            print(f"访问主页建立会话: {base_url}")
            self.driver.get(base_url)
            time.sleep(2)
            
            # 访问目标页面
            print(f"访问目标页面: {url}")
            self.driver.get(url)
            
            # 等待页面加载
            WebDriverWait(self.driver, 20).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            time.sleep(3)
            
            # 检查是否被重定向或显示错误页面
            current_url = self.driver.current_url
            page_title = self.driver.title
            
            if '403' in page_title or 'forbidden' in page_title.lower():
                print("仍然被拒绝访问，尝试其他方法...")
                # 尝试使用JavaScript加载内容
                self.driver.execute_script("window.location.reload();")
                time.sleep(3)
            
            # 滚动页面加载完整内容
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
            time.sleep(1)
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(1)
            
            # 获取页面源码
            page_source = self.driver.page_source
            
            # 提取正文
            result["content"] = self._extract_html_content(page_source, url)
            
            # 提取表格数据
            if extract_tables:
                result["tables"] = self._extract_tables_from_selenium()
                
        except TimeoutException:
            print("等待页面加载超时")
        except Exception as e:
            print(f"反爬虫绕过失败: {e}")
            # 尝试直接获取页面文本
            try:
                result["content"] = self.driver.find_element(By.TAG_NAME, "body").text
            except:
                pass
        
        return result

# 使用示例
if __name__ == "__main__":
    # 创建自动检测的爬虫
    scraper = WebContentScraper(auto_detect=True)
    
    # 测试反爬虫网站
    test_anti_scraping_urls = [
        "http://baike.baidu.com/item/%E4%BA%9A%E9%A9%AC%E5%AD%99%E4%B8%9B%E6%9E%97/60662589?noadapt=1",
        "http://www.360doc.com/content/22/1130/04/39305010_1058178720.shtml"
    ]
    
    print("测试反爬虫网站：")
    for url in test_anti_scraping_urls:
        print(f"\n{'='*60}")
        result = scraper.scrape(url, extract_tables=False)
        
        print(f"URL: {url}")
        print(f"使用方法: {result.get('method', 'unknown')}")
        print(f"内容长度: {len(result.get('content', ''))}")
        
        if result.get('content'):
            # print(f"内容预览: {result['content'][:300]}...")
            print(f"{result}")
        else:
            print("无法获取内容")
    
    # # 测试其他网站
    # test_urls = [
    #     # 动态内容网站
    #     "https://data.worldbank.org.cn/indicator/NY.GDP.MKTP.KD.ZG?locations=CU",
        
    #     # 静态内容网站
    #     "https://www.runoob.com/html/html-tutorial.html",
    #     "https://en.wikipedia.org/wiki/Python_(programming_language)"
    # ]
    
    # print("\n\n测试其他网站：")
    # for url in test_urls:
    #     print(f"\n{'='*60}")
    #     result = scraper.scrape(url, extract_tables=True)
        
    #     print(f"URL: {url}")
    #     print(f"使用方法: {result.get('method', 'unknown')}")
    #     print(f"内容长度: {len(result.get('content', ''))}")
    #     print(f"表格数量: {len(result.get('tables', []))}")
        
    #     if result.get('content'):
    #         print(f"内容预览: {result['content'][:200]}...")
        
    #     if result.get('tables'):
    #         print(f"第一个表格: {result['tables'][0].get('headers', [])}")

    
    # 关闭浏览器
    scraper.close()
    
    # print("\n\n测试强制模式:")
    # # 强制使用动态模式
    # scraper2 = WebContentScraper()
    # result = scraper2.scrape("https://www.example.com", force_dynamic=True)
    # print(f"强制动态模式: {result.get('method')}")
    
    # # 强制使用静态模式
    # result = scraper2.scrape("https://data.worldbank.org", force_dynamic=False)
    # print(f"强制静态模式: {result.get('method')}")
    
    # scraper2.close()