import re
import asyncio
from urllib.parse import unquote
import pyppeteer.errors
from pyppeteer import launch
from pyppeteer.errors import TimeoutError
from url_whitelist import WHITELIST
import requests
from lxml import html
import psutil

requests.packages.urllib3.disable_warnings()

url_result = {}


def parse_html_to_get_href(html_content, current_url):
    """解析当前html内容获取当前文章中的超链接"""
    hrefs = []  # 当前文章的超链接信息列表
    pattern = r'<a\s+href="([^"]*)"[^>]*>([^<]*)</a>'
    matches = re.finditer(pattern, html_content)
    for match in matches:
        url = match.group(1)
        if url.startswith("#"):
            url = current_url + f'{url}'
        href_title = match.group(2)
        hrefs.append([href_title, url])
    return hrefs


def check_http_url(urls):
    asyncio.get_event_loop().run_until_complete(http_main(urls))


async def http_main(urls):
    """异步检查链接任务"""
    await init_browser()
    # 设置允许同时运行的协程数量
    max_concurrent = 5
    semaphore = asyncio.Semaphore(max_concurrent)
    tasks = [asyncio.create_task(check_url_by_browser(semaphore, url)) for url in urls]
    await asyncio.gather(*tasks)
    await browser.close()


async def check_url_by_browser(semaphore, url):
    """链接检查"""
    # 判断url是否在白名单中
    if url in WHITELIST:
        url_result[url] = 'Pass'
        return
    async with semaphore:
        response = await status_code(url)
        try:
            return_code = response.status_code
            if return_code != 200:
                url_result[url] = f'Failed 页面访问失败，返回状态码：{return_code}'
                print(f'Failed 页面访问失败，返回状态码：{return_code}')
                return
            print(f'{url}状态码：{return_code}')
        except Exception as e:
            print(f'{url}获取状态码失败：{e}')

        for page in await browser.pages():
            try:
                page_title = await page.title()
                if page_title == '无标题':
                    await page.close()
            except Exception as e:
                pass

        page = await browser.newPage()
        await page.setViewport({'width': 1920, 'height': 1080})
        try:
            response = await page.goto(url, options={'timeout': 60 * 1000})
            await asyncio.sleep(2)
        except Exception as e:
            print(f'访问{url}超时，错误原因{e}')
            try:
                response = await page.goto(url, options={'timeout': 60 * 1000})
            except TimeoutError as e:
                print(f'{url}, Failed 页面加载超时{e}，请检查')
                url_result[url] = 'Failed 页面加载超时，请检查'
                await page.close()
                return
            except pyppeteer.errors.PageError as e:
                print(f'{url}: Failed {e}， 请检查')
                url_result[url] = f'Failed {e}， 请检查'
                await page.close()
                return
            except pyppeteer.errors.NetworkError as e:
                print(f'{url}: Failed {e}， 请检查')
                url_result[url] = f'Failed {e}， 请检查'
                await page.close()
                return

        current_url = page.url
        # 处理转义字符
        unquote_current_url = unquote(current_url, encoding='utf-8')  # 处理转义字符
        unquote_url = unquote(url, encoding='utf-8')
        if unquote_url != unquote_current_url:
            if r'/404' in unquote_current_url:
                url_result[url] = 'Failed 很抱歉，您访问的页面不存在'
                print(f'{url}, Failed 很抱歉，您访问的页面不存在')
            else:
                url_result[url] = 'Failed 链接发生了重定向，请检查'
                print(f'{url}, Failed 链接发生了重定向，请检查')
            await page.close()
            return

        if url.startswith('https://developer.huawei.com/') and "#" in url:
            try:
                await page.waitForSelector('h1.doc-title', options={'timeout': 30 * 1000})
            except TimeoutError as e:
                print(f'{url}：Failed 页面加载超时，报错信息：{e}，请检查')
                url_result[url] = f'Failed 页面加载超时，报错信息：{e}，请检查'
                await page.close()
                return
            except pyppeteer.errors.NetworkError as e:
                print(f'{url}：Failed 页面访问网络异常，报错信息：{e}，请检查')
                url_result[url] = f'Failed 页面访问网络异常，报错信息：{e}，请检查'
                await page.close()
                return
            content = await page.content()
            tree = html.fromstring(content)
            sections = tree.xpath('//div[starts-with(@class, "anchor-list")]/div//div/a')
            li_name = tree.xpath('//div//li[@id]') if "#li" in url or '_li' in url else []
            tabel_ids = tree.xpath('//div[@class="tbBox"]/table')
            current_sections_href_list = []
            for section in sections:
                section_href = section.get('href')
                current_sections_href_list.append(section_href)
                if section_href == unquote_url:
                    url_result[url] = 'Pass'
                    print(f'{url}: Pass')
                    await page.close()
                    return
            for li in li_name:
                li_id = li.get('id')
                if unquote_url.split('#')[-1] == li_id:
                    url_result[url] = 'Pass'
                    print(f'{url}: Pass')
                    await page.close()
                    return
            for tabel in tabel_ids:
                tabel_id = tabel.get('id')
                if unquote_url.split('#')[-1] == tabel_id:
                    url_result[url] = 'Pass'
                    print(f'{url}: Pass')
                    await page.close()
                    return

            print(f'{url}: Failed 未正确跳转到锚点，请确认链接锚点配置是否正确')
            url_result[url] = 'Failed 未正确跳转到锚点，请确认链接锚点配置是否正确'
            await page.close()
            return
        else:
            url_result[url] = 'Pass'
            print(f'{url}: Pass')
        await page.close()


async def status_code(url):
    """获取状态码"""
    code = None
    try:
        code = await asyncio.to_thread(requests.get, url=url, verify=False)
    except requests.exceptions.ConnectionError as e:
        print(f'链接错误：{e}')
        code = e
    except requests.exceptions.Timeout as e:
        print(f'请求超时：{e}')
        code = e
    except requests.exceptions.HTTPError as e:
        print(f'HTTP错误：{e}')
        code = e
    except requests.exceptions.RequestException as e:
        print(f'请求异常：{e}')
        code = e
    except OSError as e:
        print(f'系统错误：{e}')
        code = e
    except ValueError as e:
        print(f'参数错误：{e}')
        code = e
    except Exception as e:
        print(f'未知错误：{e}')
        code = e
    return code


async def init_browser():
    """初始化pyppeteer browser实例"""
    global browser
    browser = await launch(headless=False)


def kill_chromium_process(process_name):
    """结束chromium进程"""
    for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
        if not proc.info['cmdline']:
            continue
        if process_name in proc.info['cmdline'][0]:
            try:
                proc.terminate()  # 尝试正常终止进程
                proc.wait(timeout=3)  # 等待3s看进程是否终止
                if proc.is_running():
                    proc.kill()  # 如果进程仍然运行，则强制终止
            except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
                pass  # 忽略这些异常


def main(html_content, current_url):
    global url_result
    url_result = {}
    result = parse_html_to_get_href(html_content=html_content, current_url=current_url)
    if len(result) != 0:
        urls = [url[1] for url in result]
    else:
        return result
    check_http_url(urls)
    # 杀掉为结束的进程
    kill_chromium_process('chromium')
    for index, i in enumerate(result):
        result[index].append(url_result[result[index][1]])
    fail_result = []
    if len(result) == 0:
        return []
    else:
        for url_result in result:
            if url_result[-1] == 'Pass':
                continue
            fail_result.append(url_result)
        return fail_result
        # return [['使用异步并发能力', 'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/async-concurrency-overview#123123', 'Failed 未正确跳转到锚点，请确认链接锚点配置是否正确'], ['耗时任务', 'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/time-consuming-task-ove', 'Failed 很抱歉，您访问的页面不存在'], ['常驻任务', 'https://developer.huawei.com/consumer/cn/doc/harmonyos-guides/core-vision-face-comparator#section2020122517', 'Failed 未正确跳转到锚点，请确认链接锚点配置是否正确']]


if __name__ == '__main__':
    with open('../test/html/1.html', 'r', encoding='utf-8') as f:
        html_content = f.read()
    result = main(html_content)
    print(result)
    for a in result:
        print(a)
