import asyncio
import csv
import json
from playwright.async_api import async_playwright
import time

class OptimizedPBIScraper:
    def __init__(self):
        self.data = []
        self.processed_rows = {}  # 用于跟踪已处理的行
        # 根据第四页HTML结构，有9列数据
        self.headers = [
            "Organization Name", "CB", "Contact Email", "Country/Area", 
            "Evaluation Type", "Forest Type", "Forest Area (ha)", "Date From", "Date To"
        ]
        
    async def setup_browser(self, url: str):
        """初始化浏览器"""
        self.playwright = await async_playwright().start()
        self.browser = await self.playwright.chromium.launch(headless=False)
        self.context = await self.browser.new_context()
        self.page = await self.context.new_page()
        
        await self.page.goto(url)
        await self.page.wait_for_load_state('networkidle')
        
    async def extract_table_data(self):
        """提取表格数据，只爬取第四页"""
        try:
            # 等待表格加载
            await self.page.wait_for_selector('[role="grid"]', timeout=15000)
            print("表格已加载，开始导航到第四页...")
            
            # 导航到第四页
            print("\n=== 导航到第四页 ===")
            # 第一次点击到第二页
            if not await self.try_next_page():
                print("无法找到第二页，可能只有一页数据")
                return
            print("已到达第二页，继续导航到第三页...")
            
            # 第二次点击到第三页
            if not await self.try_next_page():
                print("无法找到第三页，可能只有两页数据")
                return
            print("已到达第三页，继续导航到第四页...")
            
            # 第三次点击到第四页
            if not await self.try_next_page():
                print("无法找到第四页，可能只有三页数据")
                return
            
            print("已成功导航到第四页，开始提取数据")
            await asyncio.sleep(3)  # 等待第四页加载
            
            # 首先滚动到表格顶部，确保从第一行开始爬取
            print("滚动到表格顶部，确保从第一行开始爬取...")
            await self.page.evaluate('''
                // 尝试多种可能的滚动容器
                const viewport = document.querySelector('.mid-viewport') || 
                                document.querySelector('.scrollable-cells-viewport') ||
                                document.querySelector('[role="grid"]').parentElement;
                if (viewport) {
                    viewport.scrollTop = 0;
                }
            ''')
            
            # 等待滚动完成
            await asyncio.sleep(2)
            
            # === 第一阶段：预加载所有数据 ===
            print("\n=== 开始预加载阶段：滚动到底部加载所有数据 ===")
            
            # 先滚动到底部，触发所有数据加载
            preload_attempts = 0
            max_preload_attempts = 100
            last_scroll_height = 0
            
            while preload_attempts < max_preload_attempts:
                preload_attempts += 1
                
                # 滚动到底部
                scroll_info = await self.page.evaluate('''
                    (() => {
                        const viewport = document.querySelector('.mid-viewport') || 
                                        document.querySelector('.scrollable-cells-viewport') ||
                                        document.querySelector('[role="grid"]').parentElement;
                        if (viewport) {
                            const oldHeight = viewport.scrollHeight;
                            viewport.scrollTop = viewport.scrollHeight;
                            return {
                                scrollHeight: viewport.scrollHeight,
                                scrollTop: viewport.scrollTop,
                                clientHeight: viewport.clientHeight,
                                heightChanged: viewport.scrollHeight !== oldHeight
                            };
                        }
                        return { scrollHeight: 0, scrollTop: 0, clientHeight: 0, heightChanged: false };
                    })()
                ''')
                
                print(f"预加载第 {preload_attempts} 次: 滚动高度={scroll_info['scrollHeight']}px, 位置={scroll_info['scrollTop']}px")
                
                # 等待数据加载
                await asyncio.sleep(1.5)
                
                # 检查是否还有新数据加载
                if scroll_info['scrollHeight'] == last_scroll_height:
                    print(f"滚动高度未变化，预加载完成")
                    break
                
                last_scroll_height = scroll_info['scrollHeight']
            
            print(f"预加载阶段完成，共进行了 {preload_attempts} 次滚动")
            
            # === 第二阶段：从顶部开始按8行步长提取数据 ===
            print("\n=== 开始数据提取阶段：从顶部按10行步长提取数据 ===")
            
            # 滚动回到顶部
            await self.page.evaluate('''
                const viewport = document.querySelector('.mid-viewport') || 
                                document.querySelector('.scrollable-cells-viewport') ||
                                document.querySelector('[role="grid"]').parentElement;
                if (viewport) {
                    viewport.scrollTop = 0;
                }
            ''')
            await asyncio.sleep(2)
            
            # 提取初始可见数据
            await self.extract_visible_rows()
            print(f"初始提取: 已获取 {len(self.data)} 行数据")
            
            # 开始按10行步长滚动提取
            scroll_attempts = 0
            max_scroll_attempts = 200
            no_change_count = 0
            max_no_change = 20
            
            while scroll_attempts < max_scroll_attempts and no_change_count < max_no_change:
                scroll_attempts += 1
                rows_before = len(self.data)
                
                print(f"第 {scroll_attempts} 次数据提取滚动...")
                
                # 按10行步长滚动（每行约17px，10行约170px）
                scroll_result = await self.page.evaluate('''
                     (() => {
                         const viewport = document.querySelector('.mid-viewport') || 
                                         document.querySelector('.scrollable-cells-viewport') ||
                                         document.querySelector('[role="grid"]').parentElement;
                         if (viewport) {
                             // 10行步长，每行约17px
                             const stepSize = 170; // 10行 * 17px
                             const currentScroll = viewport.scrollTop;
                             const maxScroll = viewport.scrollHeight - viewport.clientHeight;
                             const newScroll = Math.min(currentScroll + stepSize, maxScroll);
                             viewport.scrollTop = newScroll;
                             
                             return {
                                 scrolled: true,
                                 currentScroll: newScroll,
                                 maxScroll: maxScroll,
                                 isAtBottom: newScroll >= maxScroll - 20,
                                 stepSize: stepSize,
                                 rowsScrolled: 10
                             };
                         }
                         return { scrolled: false };
                     })()
                 ''')
                
                if scroll_result['scrolled']:
                     print(f"   滚动10行(步长: {scroll_result['stepSize']}px), 位置: {scroll_result['currentScroll']}/{scroll_result['maxScroll']} {'(已到底部)' if scroll_result['isAtBottom'] else ''}")
                
                # 等待数据渲染
                await asyncio.sleep(1.0)
                
                # 提取当前可见数据
                await self.extract_visible_rows()
                
                rows_after = len(self.data)
                new_rows = rows_after - rows_before
                
                # 计算进度
                scroll_progress = f"{(scroll_result.get('currentScroll', 0) / max(scroll_result.get('maxScroll', 1), 1) * 100):.1f}%" if scroll_result.get('scrolled') else "N/A"
                print(f"提取进度 {scroll_attempts}/{max_scroll_attempts}: 已提取={rows_after}行, 本轮新增={new_rows}行, 进度={scroll_progress}")
                
                if new_rows == 0:
                    no_change_count += 1
                    print(f"本轮未发现新数据，无变化次数: {no_change_count}/{max_no_change}")
                else:
                    no_change_count = 0
                
                # 如果已到底部，结束滚动
                if scroll_result.get('isAtBottom', False):
                    print("已滚动到底部，数据提取完成")
                    break
            
            print(f"第四页数据提取完成，共进行了 {scroll_attempts} 次滚动，总计提取 {len(self.data)} 行数据")
                
        except Exception as e:
            print(f"提取数据时出错: {e}")
    
    async def extract_visible_rows(self):
        """提取当前可见的行数据 - 针对第四页虚拟化表格优化"""
        try:
            # 针对虚拟化表格，获取所有可见的数据行
            # 使用更精确的选择器，确保获取到实际渲染的行
            rows = await self.page.query_selector_all('div[role="row"][row-index]:not([aria-rowindex="1"])')
            
            new_rows_count = 0
            print(f"发现 {len(rows)} 个可见数据行")
            
            # 按照实际显示顺序处理行
            for row in rows:
                try:
                    # 获取行索引和位置信息
                    row_index = await row.get_attribute('row-index')
                    aria_rowindex = await row.get_attribute('aria-rowindex')
                    
                    # 检查行是否真正可见（通过样式检查）
                    style = await row.get_attribute('style')
                    if 'display:none' in style or 'visibility:hidden' in style:
                        continue
                    
                    # 获取该行的所有主要数据单元格
                    cells = await row.query_selector_all('div[role="gridcell"].pivotTableCellWrap.cell-interactive.main-cell')
                    
                    if len(cells) >= len(self.headers):
                        row_data = []
                        for i, cell in enumerate(cells[:len(self.headers)]):
                            try:
                                # 使用textContent获取更准确的文本内容
                                text = await cell.text_content()
                                if text is None:
                                    text = await cell.inner_text()
                                
                                # 清理文本内容
                                clean_text = text.replace('&nbsp;', '').replace('&amp;', '&').replace('\n', ' ').replace('\t', ' ').strip()
                                row_data.append(clean_text if clean_text else "")
                            except Exception as cell_error:
                                print(f"提取单元格数据时出错: {cell_error}")
                                row_data.append("")
                        
                        # 验证数据完整性
                        if len(row_data) != len(self.headers):
                            print(f"数据列数不匹配，期望 {len(self.headers)} 列，实际 {len(row_data)} 列: {row_data}")
                            continue
                        
                        # 检查是否为有效数据（至少第一列Organization Name不为空）
                        if not row_data[0] or not row_data[0].strip():
                            continue
                        
                        # 第四页数据格式：Organization Name, CB, Contact Email, Country/Area, Evaluation Type, Forest Type, Forest Area (ha), Date From, Date To
                        org_name = row_data[0].strip()
                        
                        # 使用Organization Name、Forest Area和Date From作为复合唯一标识
                        forest_area = row_data[6].strip() if len(row_data) > 6 else ""
                        date_from = row_data[7].strip() if len(row_data) > 7 else ""
                        row_key = (org_name, forest_area, date_from)
                        
                        # 检查是否已存在
                        if row_key not in self.processed_rows:
                            self.processed_rows[row_key] = row_data
                            self.data.append(row_data)
                            new_rows_count += 1
                            
                            # 显示新增数据的关键信息
                            cb = row_data[1] if len(row_data) > 1 else "N/A"
                            country = row_data[3] if len(row_data) > 3 else "N/A"
                            print(f"新增数据行 [总计={len(self.data)}] [PBI-row-index={row_index}]: {org_name} | {cb} | {country}")
                        else:
                            forest_area_display = row_data[6] if len(row_data) > 6 else 'N/A'
                            date_from_display = row_data[7] if len(row_data) > 7 else 'N/A'
                            print(f"跳过重复数据: {org_name} | Forest Area: {forest_area_display} | Date From: {date_from_display}")
                
                except Exception as e:
                    print(f"处理行数据时出错: {e}")
                    continue
            
            if new_rows_count > 0:
                print(f"本次提取新增 {new_rows_count} 行数据")
            else:
                print("本次提取未发现新数据")
                    
        except Exception as e:
            print(f"提取可见行时出错: {e}")
    
    async def try_next_page(self):
        """尝试点击下一页"""
        try:
            # 查找可能的下一页按钮
            next_selectors = [
                'button[title*="下一页"]',
                'button[aria-label*="下一页"]',
                'button[title*="Next"]',
                'button[aria-label*="Next"]',
                '.paging button:last-child',
                '[data-automation-id="nextPageButton"]'
            ]
            
            for selector in next_selectors:
                try:
                    button = await self.page.query_selector(selector)
                    if button:
                        is_disabled = await button.get_attribute('disabled')
                        if not is_disabled:
                            await button.click()
                            await self.page.wait_for_load_state('networkidle')
                            await asyncio.sleep(3)
                            print("成功点击下一页")
                            return True
                except:
                    continue
            
            print("未找到下一页按钮或已到最后一页")
            return False
            
        except Exception as e:
            print(f"尝试下一页时出错: {e}")
            return False
    
    async def save_data(self):
        """保存数据"""
        if not self.data:
            print("没有数据需要保存")
            return
        
        # 生成带时间戳的文件名
        from datetime import datetime
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        csv_filename = f'pbi_data_{timestamp}.csv'
        json_filename = f'pbi_data_{timestamp}.json'
        
        # 保存为CSV
        try:
            with open(csv_filename, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                writer.writerow(self.headers)
                writer.writerows(self.data)
            print(f"CSV文件保存成功: {csv_filename}")
        except Exception as e:
            print(f"保存CSV文件时出错: {e}")
        
        # 保存为JSON
        try:
            json_data = []
            for row in self.data:
                if len(row) == len(self.headers):
                    json_data.append(dict(zip(self.headers, row)))
            
            with open(json_filename, 'w', encoding='utf-8') as f:
                json.dump(json_data, f, ensure_ascii=False, indent=2)
            print(f"JSON文件保存成功: {json_filename}")
        except Exception as e:
            print(f"保存JSON文件时出错: {e}")
        
        # 数据统计
        print(f"\n=== 数据统计 ===")
        print(f"总行数: {len(self.data)}")
        print(f"列数: {len(self.headers)}")
        print(f"列名: {', '.join(self.headers)}")
        
        # 统计各列的非空数据
        for i, header in enumerate(self.headers):
            non_empty_count = sum(1 for row in self.data if i < len(row) and row[i].strip())
            print(f"{header}: {non_empty_count}/{len(self.data)} 非空")
        
        print(f"\n文件已保存: {csv_filename}, {json_filename}")
    
    async def close(self):
        """关闭浏览器"""
        await self.browser.close()
        await self.playwright.stop()

async def main():
    # 替换为实际的URL
    url = "https://app.powerbi.com/view?r=eyJrIjoiN2U3NGMyNWEtZTAxNS00MzVhLWExNmMtOThhZjdiYjQ4MWNkIiwidCI6IjEyNGU2OWRiLWVmNjUtNDk2Yi05NmE5LTVkNTZiZWMxZDI5MSIsImMiOjl9"
    
    print("=== PowerBI 数据爬取工具（仅第四页）===")
    print(f"目标URL: {url}")
    print(f"预期数据列: {', '.join(['Organization Name', 'CB', 'Contact Email', 'Country/Area', 'Evaluation Type', 'Forest Type', 'Forest Area (ha)', 'Date From', 'Date To'])}")
    print("注意：此脚本只会爬取第四页的数据")
    print("\n正在启动浏览器...")
    
    scraper = OptimizedPBIScraper()
    
    try:
        await scraper.setup_browser(url)
        
        print("\n浏览器已打开，请手动完成以下操作：")
        print("1. 登录PowerBI账户（如需要）")
        print("2. 等待页面完全加载")
        print("3. 确认表格数据可见")
        print("\n完成后按回车键开始爬取数据...")
        input()
        
        print("\n=== 开始数据爬取（仅第四页）===")
        print("注意：显示的'实际行号'从0开始计数，'PBI-row-index'是PowerBI内部的行索引")
        print("爬取过程中请勿关闭浏览器窗口\n")
        
        start_time = time.time()
        await scraper.extract_table_data()
        end_time = time.time()
        
        print(f"\n=== 第四页爬取完成 ===")
        print(f"耗时: {end_time - start_time:.2f} 秒")
        print(f"从第四页共爬取 {len(scraper.data)} 行数据")
        
        if scraper.data:
            await scraper.save_data()
        else:
            print("未爬取到任何数据，请检查：")
            print("1. 页面是否正确加载")
            print("2. 表格数据是否可见")
            print("3. 网络连接是否正常")
        
    except KeyboardInterrupt:
        print("\n用户中断了程序执行")
    except Exception as e:
        print(f"\n程序执行出错: {e}")
        print("请检查网络连接和页面状态")
    
    finally:
        print("\n正在关闭浏览器...")
        try:
            await scraper.close()
            print("浏览器已关闭")
        except:
            print("关闭浏览器时出现问题，请手动关闭")
        print("程序结束")

if __name__ == "__main__":
    asyncio.run(main())