import asyncio
from os.path import join
import csv
from urllib.parse import urlparse
from scrapy.selector import Selector
import requests
from playwright.sync_api import sync_playwright
from playwright.async_api import async_playwright
from requests import RequestException

# 公共配置常量
URL = 'https://top.baidu.com/board?tab=realtime'
TIMEOUT = 30000  # 毫秒
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'

def validate_url_format(url: str) -> bool:
    """验证URL格式是否正确"""
    try:
        result = urlparse(url)
        return all([result.scheme, result.netloc])
    except:
        return False

def validate_url(url: str, timeout=5) -> dict:
    """ 返回包含详细验证结果的字典 """
    validation = {
        'is_valid': False,
        'status_code': None,
        'reason': None,
        'final_url': None
    }

    # 格式验证
    try:
        result = urlparse(url)
        if not all([result.scheme, result.netloc]):
            validation['reason'] = "Invalid URL structure"
            return validation
    except:
        validation['reason'] = "Invalid URL format"
        return validation

    # 网络可达性验证
    try:
        response = requests.head(url,
                               timeout=timeout,
                               allow_redirects=True,
                               headers={'User-Agent': 'Mozilla/5.0'})

        validation.update({
            'is_valid': 200 <= response.status_code < 400,
            'status_code': response.status_code,
            'final_url': response.url,
            'reason': "OK" if response.ok else f"HTTP Error {response.status_code}"
        })
    except RequestException as e:
        validation['reason'] = str(e)
    print(validation)
    return validation

def sync_get_html(url: str) -> str:
    """同步方式获取页面HTML"""
    try:
        with sync_playwright() as p:
            # 启动带参数的浏览器实例
            browser = p.chromium.launch(
                headless=True,  # 无头模式
                timeout=TIMEOUT
            )
            page = browser.new_page(user_agent=USER_AGENT)

            # 导航并等待
            page.goto(url, timeout=TIMEOUT)
            page.wait_for_load_state("networkidle")

            # 获取并返回页面内容
            content = page.content()
            browser.close()
            return content
    except Exception as e:
        print(f"同步请求失败: {str(e)}")
        return ""

async def async_get_html(url: str) -> str:
    """异步方式获取页面HTML"""
    try:
        async with async_playwright() as p:
            browser = await p.chromium.launch(
                headless=True,
                timeout=TIMEOUT
            )
            page = await browser.new_page(user_agent=USER_AGENT)

            # 异步导航和等待
            await page.goto(url, timeout=TIMEOUT)
            await page.wait_for_load_state("networkidle")

            content = await page.content()
            await browser.close()
            return content
    except Exception as e:
        print(f"异步请求失败: {str(e)}")
        return ""

def get_html(url, async_mode=False):
    if 'http://' not in url and 'https://' not in url:
        url = 'https://' + url
    # if validate_url(url)['is_valid'] is False:
    #     return ''
    if validate_url_format(url):
        try:
            if not async_mode:
                print("=== 开始同步请求 ===")
                sync_html = sync_get_html(URL)
                print(f"同步获取内容长度: {len(sync_html)} 字符")
                return sync_html
            else:
                # 异步示例
                print("\n=== 开始异步请求 ===")
                async_html = asyncio.run(async_get_html(URL))
                print(f"异步获取内容长度: {len(async_html)} 字符")
                return async_html
        except Exception as e:
            print(type(e), e)
            return ''
    else:
        return ''

def get_html_data(data_str, html_content: str) -> list:
    """
    从HTML内容中提取数据并返回list
    html_content: HTML内容字符串
    data_str: 数据格式字符串， 格式：[name, type, search_rule], type: css, xpath, regex
    name: 数据列名称，列名具有相同前缀（使用_分割）将会打包到同一个字典中，例如：item_name, item_price, 
          item_url, item_description将会打包到一个字典中{'item_name': 'xxx', 'item_price': 'xxx', 
          'item_url': 'xxx', 'item_description': 'xxx'}  ，如果数据长度不一致，将会以最长的为准，不足的部分将会填充空值 ，
          前缀不一样的将会打包到不同的字典中，
    type: 搜索类型，css, xpath, regex
    search_rule: 搜索规则
    return: 提取到的数据字典，主键为name（不存在分割符时）或其的前缀
    """
    try:
        # 首先遍历data_str，将name相同的项合并到一个字典中
        data_dict = {}
        data_dict_attr = {} # 用于data_dict中同一个key的字典key的集合
        for item in data_str:
            if not item['name']:
                continue
            if '_' not in item['name']:
                key = item['name']
                if key not in data_dict:
                    data_dict[key] = {}
            else:
                key = item['name'].split('_')[0]
                if key not in data_dict:
                    data_dict[key] = {}
                if key not in data_dict_attr:
                    data_dict_attr[key] = set()
                data_dict_attr[key].add(item['name'])
        # 再次遍历data_str，收集数据到对应的字典中
        selector = Selector(text=html_content)
        opr_map = {
            'css': selector.css,
            'xpath': selector.xpath,
            'regex': selector.re
        }
        for item in data_str:
            if not item['name']:
                continue
            if '_' not in item['name']:
                re = opr_map.get(item['type'], lambda x: None)(item['search_rule'])
                key = item['name']
                if re is None:
                    continue
                if item['type'] == 'regex' and type(re) is list:
                    data_dict[key] = [dict((item['name'], i)) for i in re]
                else:
                    data_dict[key] = [{item['name']: i.get()} for i in re]
            else:
                key = item['name'].split('_')[0]
                re = opr_map.get(item['type'], lambda x: None)(item['search_rule'])
                if re is None:
                    continue
                #data_dict[key] = {k: '' for k in data_dict_attr[key]}
                data_dict[key][item['name']] = []
                # 填充数据
                if item['type'] =='regex' and type(re) is list:
                    data_dict[key][item['name']] = [dict((item['name'], i)) for i in re]
                else:
                    data_dict[key][item['name']] = [{item['name']: i.get()} for i in re]

    except Exception as e:
        print(type(e), e)
        return {}
    print(data_dict)
    return data_dict

def export_csv(data: dict, filepath: str):
    """
    将数据导出到CSV文件
    data: 包含数据的列表
    filename: 输出文件名
    """
    try:
        for key in data:
            file_name = join(filepath, key + '.csv')
            if not data[key]:
                continue
            if type(data[key]) is list:                
                with open(file_name, 'w', newline='', encoding='utf-8') as csvfile:
                    fieldnames = data[key][0].keys()
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    for item in data[key]:
                        writer.writerow(item)
            else:
                with open(file_name, 'w', newline='', encoding='utf-8') as csvfile:
                    fieldnames = data[key].keys()
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    tmax = max([len(data[key][i]) for i in data[key].keys()])
                    _tmp_data = {}
                    for i in range(tmax):
                        for j in data[key].keys():
                            if i < len(data[key][j]):
                                _tmp_data[j] = data[key][j][i][j]
                            else:
                                _tmp_data[j] = ''
                        writer.writerow(_tmp_data)
            print(f"数据已导出到 {file_name}")
        return True
    except Exception as e:
        print(f"导出CSV文件时出错: {str(e)}")
        return False


def main():
    # 同步示例
    sync_html = get_html(URL)
    print(sync_html)
    
    # 提取数据示例
    if sync_html:
        data = get_html_data(sync_html)
        print(f"提取到的数据: {data}")
    
    # 异步示例
    get_html(URL, async_mode=True)


if __name__ == "__main__":
    # 首次使用需要安装浏览器
    # import playwright
    # playwright install chromium
    main()

    #item_title div[class="category-wrap_iQLoo horizontal_1eKyQ"] div[class="c-single-text-ellipsis"]::text
    #item_index div[class="category-wrap_iQLoo horizontal_1eKyQ"] div[class="hot-index_1Bl1a"]::text