import asyncio
from playwright.async_api import async_playwright
import pandas as pd
import re
from pathlib import Path
from config import SCROLL_CONFIG, FILE_PATHS, SELECTORS, REGEX_PATTERNS, BROWSER_CONFIG

async def scrape_nom_standards():
    """
    使用Playwright从HTML文件中提取NOM标准数据并保存到Excel
    """
    async with async_playwright() as p:
        # 启动浏览器
        browser = await p.chromium.launch(headless=BROWSER_CONFIG['headless'])
        page = await browser.new_page()
        page.set_default_timeout(BROWSER_CONFIG['timeout'])
        
        try:
            await page.goto("https://platiica.economia.gob.mx/normalizacion/catalogo-mexicano-de-normaswd_asp-id29/")
            
            # 等待页面加载完成
            await page.wait_for_load_state('networkidle')
            
            # 处理无限滚动加载所有数据
            print("开始滚动页面加载所有数据...")
            previous_count = 0
            no_change_count = 0
            max_no_change = SCROLL_CONFIG['max_no_change']
            
            while no_change_count < max_no_change:
                # 滚动到页面底部
                await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                
                # 等待新内容加载
                await page.wait_for_timeout(SCROLL_CONFIG['wait_time'])
                
                # 检查当前数据项数量
                current_items = await page.query_selector_all(SELECTORS['item_container'])
                current_count = len(current_items)
                
                print(f"当前找到 {current_count} 个数据项")
                
                if current_count > previous_count:
                    previous_count = current_count
                    no_change_count = 0  # 重置计数器
                    print("发现新数据，继续滚动...")
                else:
                    no_change_count += 1
                    print(f"未发现新数据 ({no_change_count}/{max_no_change})")
                
                # 额外等待，确保所有AJAX请求完成
                try:
                    await page.wait_for_load_state('networkidle', timeout=SCROLL_CONFIG['network_timeout'])
                except:
                    pass  # 超时也继续
            
            print(f"滚动完成，总共找到 {previous_count} 个数据项")
            
            # 查找所有包含NOM标准的div元素
            items = await page.query_selector_all(SELECTORS['item_container'])
            
            nom_standards = []
            
            for item in items:
                try:
                    # 查找h3标签内的链接文本
                    h3_element = await item.query_selector(SELECTORS['title_link'])
                    if h3_element:
                        # 获取链接文本
                        link_text = await h3_element.inner_text()
                        
                        # 使用正则表达式提取"> "后面的内容
                        match = re.search(REGEX_PATTERNS['standard_extraction'], link_text.strip())
                        if match:
                            nom_standard = match.group(1).strip()
                            
                            # 获取描述文本
                            desc_element = await item.query_selector(SELECTORS['description'])
                            description = ""
                            if desc_element:
                                description = await desc_element.inner_text()
                                description = description.strip()
                            
                            # 获取链接URL
                            href = await h3_element.get_attribute('href')
                            
                            nom_standards.append({
                                'NOM Standard': nom_standard,
                                'Description': description,
                                'URL': href
                            })
                            
                            print(f"找到标准: {nom_standard}")
                            
                except Exception as e:
                    print(f"处理项目时出错: {e}")
                    continue
            
            # 如果没有找到数据，尝试其他选择器
            if not nom_standards:
                print("未找到标准格式的数据，尝试查找所有包含'NMX'或'NOM'的文本...")
                
                # 查找所有包含NMX或NOM的文本
                all_links = await page.query_selector_all('a')
                for link in all_links:
                    try:
                        text = await link.inner_text()
                        if 'NMX-' in text or 'NOM-' in text:
                            # 提取标准编号
                            match = re.search(REGEX_PATTERNS['fallback_standard'], text)
                            if match:
                                standard = match.group(1)
                                href = await link.get_attribute('href')
                                nom_standards.append({
                                    'NOM Standard': standard,
                                    'Description': text.strip(),
                                    'URL': href
                                })
                                print(f"找到标准: {standard}")
                    except:
                        continue
            
            if nom_standards:
                # 创建DataFrame
                df = pd.DataFrame(nom_standards)
                
                # 保存到Excel文件
                excel_file = FILE_PATHS['output_excel']
                df.to_excel(excel_file, index=False, engine='openpyxl')
                
                print(f"\n成功提取 {len(nom_standards)} 个NOM标准")
                print(f"数据已保存到: {excel_file}")
                
                # 显示前几条数据
                print("\n前5条数据预览:")
                print(df.head().to_string(index=False))
                
            else:
                print("未找到任何NOM标准数据")
                
        except Exception as e:
            print(f"加载页面时出错: {e}")
            
        finally:
            await browser.close()

def main():
    """
    主函数
    """
    print("开始提取NOM标准数据...")
    asyncio.run(scrape_nom_standards())
    print("提取完成！")

if __name__ == "__main__":
    main()