import asyncio
import csv
import json
from playwright.async_api import async_playwright
import time
from typing import List, Dict

class PBITableScraper:
    def __init__(self):
        self.data = []
        self.headers = []
        self.page = None
        
    async def setup_browser(self, url: str):
        """初始化浏览器和页面"""
        self.playwright = await async_playwright().start()
        self.browser = await self.playwright.chromium.launch(headless=False)
        self.context = await self.browser.new_context()
        self.page = await self.context.new_page()
        
        # 设置较长的超时时间
        self.page.set_default_timeout(30000)
        
        await self.page.goto(url)
        await self.page.wait_for_load_state('networkidle')
        
    async def extract_headers(self):
        """提取表格头部信息"""
        try:
            # 等待表格头部加载
            await self.page.wait_for_selector('[role="columnheader"]', timeout=10000)
            
            # 获取所有列头
            header_elements = await self.page.query_selector_all('[role="columnheader"].pivotTableCellWrap')
            
            headers = []
            for header in header_elements:
                text = await header.inner_text()
                # 清理文本，移除排序图标等
                clean_text = text.replace('可以排序', '').replace('&nbsp;', '').strip()
                if clean_text:
                    headers.append(clean_text)
            
            self.headers = headers
            print(f"提取到表格头部: {self.headers}")
            return headers
            
        except Exception as e:
            print(f"提取表格头部时出错: {e}")
            return []
    
    async def extract_current_page_data(self):
        """提取当前页面的数据"""
        try:
            # 等待数据行加载
            await self.page.wait_for_selector('[role="row"][row-index]', timeout=10000)
            
            # 获取所有数据行（从row-index="0"开始，排除表头行）
            rows = await self.page.query_selector_all('[role="row"][row-index]')
            
            # 过滤掉表头行
            data_rows = []
            for row in rows:
                aria_rowindex = await row.get_attribute('aria-rowindex')
                if aria_rowindex != "1":  # 排除表头行
                    data_rows.append(row)
            rows = data_rows
            
            page_data = []
            for row in rows:
                # 获取该行的所有单元格
                cells = await row.query_selector_all('[role="gridcell"].pivotTableCellWrap')
                
                row_data = []
                for cell in cells:
                    text = await cell.inner_text()
                    # 清理文本
                    clean_text = text.replace('&nbsp;', '').strip()
                    row_data.append(clean_text)
                
                if row_data and any(cell.strip() for cell in row_data):  # 确保行不为空
                    page_data.append(row_data)
            
            print(f"当前页面提取到 {len(page_data)} 行数据")
            return page_data
            
        except Exception as e:
            print(f"提取当前页面数据时出错: {e}")
            return []
    
    async def scroll_to_load_more(self):
        """滚动页面加载更多数据"""
        try:
            # 获取表格容器
            table_container = await self.page.query_selector('.mid-viewport')
            if not table_container:
                return False
            
            # 滚动到底部
            await table_container.evaluate('element => element.scrollTop = element.scrollHeight')
            
            # 等待新数据加载
            await asyncio.sleep(2)
            
            # 检查是否有新数据加载
            current_rows = await self.page.query_selector_all('[role="row"][row-index]')
            return len(current_rows) > 0
            
        except Exception as e:
            print(f"滚动加载更多数据时出错: {e}")
            return False
    
    async def click_next_page(self):
        """点击下一页按钮"""
        try:
            # 查找下一页按钮（可能需要根据实际页面调整选择器）
            next_button_selectors = [
                'button[aria-label*="下一页"]',
                'button[aria-label*="Next"]',
                '.pagination button:last-child',
                '[data-testid="next-page"]',
                'button:has-text("下一页")',
                'button:has-text("Next")'
            ]
            
            for selector in next_button_selectors:
                try:
                    next_button = await self.page.query_selector(selector)
                    if next_button:
                        is_disabled = await next_button.get_attribute('disabled')
                        if not is_disabled:
                            await next_button.click()
                            await self.page.wait_for_load_state('networkidle')
                            await asyncio.sleep(3)
                            return True
                except:
                    continue
            
            return False
            
        except Exception as e:
            print(f"点击下一页时出错: {e}")
            return False
    
    async def scrape_all_data(self):
        """爬取所有数据"""
        print("开始爬取表格数据...")
        
        # 首先提取表格头部
        await self.extract_headers()
        
        page_num = 1
        max_scroll_attempts = 10  # 最大滚动尝试次数
        
        while True:
            print(f"正在处理第 {page_num} 页...")
            
            # 多次滚动以确保加载所有数据
            scroll_count = 0
            while scroll_count < max_scroll_attempts:
                old_data_count = len(self.data)
                
                # 滚动加载更多数据
                await self.scroll_to_load_more()
                
                # 提取当前页面数据
                current_data = await self.extract_current_page_data()
                
                # 去重并添加新数据
                for row in current_data:
                    if row not in self.data:
                        self.data.append(row)
                
                new_data_count = len(self.data)
                
                # 如果没有新数据加载，尝试下一页
                if new_data_count == old_data_count:
                    scroll_count += 1
                else:
                    scroll_count = 0  # 重置计数器
                
                print(f"当前总数据量: {new_data_count}")
                await asyncio.sleep(1)
            
            # 尝试点击下一页
            if not await self.click_next_page():
                print("没有更多页面，爬取完成")
                break
            
            page_num += 1
            
            # 防止无限循环
            if page_num > 100:  # 设置最大页数限制
                print("达到最大页数限制，停止爬取")
                break
        
        print(f"爬取完成！总共获取 {len(self.data)} 行数据")
        return self.data
    
    async def save_to_csv(self, filename: str = "pbi_table_data.csv"):
        """保存数据到CSV文件"""
        try:
            with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
                writer = csv.writer(csvfile)
                
                # 写入表头
                if self.headers:
                    writer.writerow(self.headers)
                
                # 写入数据
                for row in self.data:
                    writer.writerow(row)
            
            print(f"数据已保存到 {filename}")
            
        except Exception as e:
            print(f"保存CSV文件时出错: {e}")
    
    async def save_to_json(self, filename: str = "pbi_table_data.json"):
        """保存数据到JSON文件"""
        try:
            json_data = []
            
            for row in self.data:
                if len(row) == len(self.headers):
                    row_dict = dict(zip(self.headers, row))
                    json_data.append(row_dict)
            
            with open(filename, 'w', encoding='utf-8') as jsonfile:
                json.dump(json_data, jsonfile, ensure_ascii=False, indent=2)
            
            print(f"数据已保存到 {filename}")
            
        except Exception as e:
            print(f"保存JSON文件时出错: {e}")
    
    async def close(self):
        """关闭浏览器"""
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()

async def main():
    """主函数"""
    # 请替换为实际的PBI页面URL
    url = "YOUR_PBI_TABLE_URL_HERE"
    
    scraper = PBITableScraper()
    
    try:
        # 设置浏览器并打开页面
        await scraper.setup_browser(url)
        
        # 等待用户手动登录或处理任何认证（如果需要）
        print("请在浏览器中完成登录等操作，然后按回车继续...")
        input()
        
        # 开始爬取数据
        await scraper.scrape_all_data()
        
        # 保存数据
        await scraper.save_to_csv()
        await scraper.save_to_json()
        
    except Exception as e:
        print(f"爬取过程中出现错误: {e}")
    
    finally:
        await scraper.close()

if __name__ == "__main__":
    asyncio.run(main())