import asyncio
import os
from urllib.parse import unquote

import pyppeteer.errors
import requests
from lxml import html
from pyppeteer import launch
from pyppeteer.errors import TimeoutError

from URL_WHITELIST import WHITELIST
from global_variable import logging, Var

var = Var()

# pyppeteer浏览器窗口数据
WINDOW_WIDTH, WINDOW_HEIGHT = 1920, 1080
requests.packages.urllib3.disable_warnings()


def classify_url(links):
    """
    分类超链接，分为https和md
    :param links:
    :return: http_urls, md_urls
    """
    http_urls, md_urls = [], []
    for link in links:
        if link.startswith('#') or ('.md' in link and not (link.startswith('http:') or link.startswith('https'))):
            md_urls.append(link)
        else:
            http_urls.append(link)
    return http_urls, md_urls


def check_md_url(urls, queue):
    """
    检查md链接是否存在
    :param urls: md链接列表
    :return: None
    """

    queue.put(f'Start detecting MD hyperlinks')
    file_list = []
    print(var.excel_path)
    for root, dirs, files in os.walk(var.excel_path.split('超链接')[0]):
        for f in files:
            file_list.append(f)
    for url in urls:
        if '.md' in url:
            md_file_name = url.split('.md')[0] + '.md'
            if '.md#' not in url:  # 引用文章级别
                if md_file_name in file_list:
                    var.result_dict[url] = 'Pass'
                    queue.put(f'{url}检测结果： Pass')
                else:
                    var.result_dict[url] = 'Failed 引用的其他文章链接请自行检查'
                    queue.put(f'{url}检测结果： Failed 引用的其他文章链接请自行检查')
            else:  # 引用别的文章下的子内容
                if md_file_name not in file_list:
                    var.result_dict[url] = 'Failed 引用的其他文章链接请自行检查'
                    queue.put(f'{url}检测结果： Failed 引用的其他文章链接请自行检查')
                else:
                    section_id = url.split('.md#')[-1]
                    section_text = fr'<a name="{section_id}"></a>'

                    with open(md_file_name, 'r', encoding='utf-8') as f:
                        if section_text in f.read():
                            var.result_dict[url] = 'Pass'
                            queue.put(f'{url}检测结果： Pass')
                        else:
                            var.result_dict[url] = f'Failed 引用{md_file_name}中的{section_id}未找到，请检查'
                            queue.put(f'{url}检测结果： Failed 引用{md_file_name}中的{section_id}未找到，请检查')
    print(2)


def check_http_url(urls, test_mode, queue):
    """
    通过异步方式检查http链接
    :param urls: http链接列表
    :return: None
    """
    # asyncio.run(http_main(urls, test_mode, queue))
    asyncio.get_event_loop().run_until_complete(http_main(urls, test_mode, queue))


def chose_mode():
    """
    检测模式选择
    :return:
    """
    chose = ''
    while True:
        chose = input(
            '请选择检测模式：1-有头模式（协程检测效率高，期间用户操作可能会影响结果正确性），2-无头模式（单线程检测，用户可进行其他操作,但检测时间较长）\n')
        if chose != '1' and chose != "2":
            logging.error('输入错误，请重新输入！！！')
        else:
            return chose


async def init_browser(mode, queue):
    """
    初始化pypeteer browser实例
    :param mode: 检测模式
    :return: None
    """
    global browser
    queue.put(f'Start initializing chromium')
    if mode:
        browser = await launch(headless=False)
    else:
        browser = await launch(headless=True)


async def check_url_by_browser(semaphore, url, queue, progress_counter, all_tasks):
    """
    通过直接访问检测url
    :param semaphore: 最大协程数量
    :param url: 检测url地址
    :return: None
    """
    # 判断url是否在白名单中
    if url in WHITELIST:
        queue.put(f'{url}: Pass')
        var.result_dict[url] = 'Pass'
        queue.put((progress_counter[0] * 99 // all_tasks))
        return

    async with(semaphore):
        progress_counter[0] += 1
        response = await status_code(url, queue)
        try:
            return_code = response.status_code
            if return_code != 200:
                var.result_dict[url] = f'Failed 页面访问失败，返回状态码：{return_code}'
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            queue.put(f'{url}状态码：{return_code}')
            await asyncio.sleep(1)
        except Exception as e:
            queue.put(f'{url}获取状态码失败：{e}')
        for page in await browser.pages():
            try:
                page_title = await page.title()
                if page_title == '无标题':
                    await page.close()
            except Exception as e:
                pass

        page = await browser.newPage()
        await page.setViewport({'width': WINDOW_WIDTH, 'height': WINDOW_HEIGHT})
        try:
            response = await page.goto(url, options={'timeout': 60 * 1000})
            await asyncio.sleep(2)
        except Exception as e:
            queue.put(f'访问{url}超时，错误原因：{e}')
            try:
                response = await page.goto(url, options={'timeout': 60 * 1000})
            except TimeoutError as e:
                queue.put(f'{url}：Failed 页面加载超时{e}，请检查')
                var.result_dict[url] = 'Failed 页面加载超时，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.PageError as e:
                queue.put(f'{url}：Failed {e}，请检查')
                var.result_dict[url] = f'Failed {e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.NetworkError as e:
                queue.put(f'{url}：Failed {e}，请检查')
                var.result_dict[url] = f'Failed {e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return

        current_url = page.url
        # 处理转译字符
        unquote_current_url = unquote(current_url, encoding='utf-8')  # 处理转译字符
        unquote_url = unquote(url, encoding='utf-8')
        if unquote_current_url != unquote_url:
            if r'/404' in unquote_current_url:
                var.result_dict[url] = 'Failed 很抱歉，您访问的页面不存在'
                queue.put(f'{url}：Failed 很抱歉，您访问的页面不存在')
            else:
                var.result_dict[url] = 'Failed 链接发生了重定向，请检查'
                queue.put(f'{url}：Failed 链接发生了重定向，重定向后的地址为{current_url}请检查')
            await page.close()
            queue.put((progress_counter[0] * 99 // all_tasks))
            return

        if url.startswith(
                'https://developer.huawei.com/') and '#' in url and url != 'https://developer.huawei.com/consumer/cn/support/feedback/#/' and not url.startswith(
            'https://developer.huawei.com/consumer/cn/codelabsPortal'):
            try:
                await page.waitForSelector('h1.doc-title', options={'timeout': 30 * 1000})
            except TimeoutError as e:
                queue.put(f'{url}：Failed 页面加载超时，报错信息：{e}，请检查')
                var.result_dict[url] = f'Failed 页面加载超时，报错信息：{e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.NetworkError as e:
                queue.put(f'{url}：Failed 页面访问网络异常，报错信息：{e}，请检查')
                var.result_dict[url] = f'Failed 页面访问网络异常，报错信息：{e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            content = await page.content()
            tree = html.fromstring(content)
            sections = tree.xpath('//div[starts-with(@class, "anchor-list")]/div//div/a')
            li_name = tree.xpath("//div//ul/li[@id]/a[@name]")
            table_ids = tree.xpath('//div[@class="tbBox"]/table')
            current_sections_href_list = []
            for section in sections:
                section_href = section.get('href')
                unquote_url = unquote(url, encoding='utf-8')
                current_sections_href_list.append(section_href)
                if section_href == unquote_url:
                    var.result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return
            for li in li_name:
                name = li.get('name')
                unquote_url = unquote(url, encoding='utf-8')
                if unquote_url.split('#')[-1] == name:
                    var.result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return
            for table in table_ids:
                table_id = table.get('id')
                unquote_url = unquote(url, encoding='utf-8')
                if unquote_url.split('#')[-1] == table_id:
                    var.result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return

            queue.put(f'{url}: Failed 未正确跳转到锚点请确认链接中的section id是否正确')
            var.result_dict[url] = 'Failed 未正确跳转到锚点请确认链接中的section id是否正确'
            await page.close()
            queue.put((progress_counter[0] * 99 // all_tasks))
            return
        else:
            queue.put(f'{url}: Pass')
            var.result_dict[url] = 'Pass'

        await page.close()
        queue.put((progress_counter[0] * 99 // all_tasks))


async def handle_response(response):
    status = response.status
    return status


async def http_main(urls, mode, queue):
    """
    异步检测超链接主函数
    :param urls: http链接列表
    :return: None
    """
    test_mode = mode
    await init_browser(test_mode, queue)
    # 设置允许同时运行的协程数量
    max_concurrent = 5
    semaphore = asyncio.Semaphore(max_concurrent)
    all_tasks = len(urls)
    progress_counter = [0]

    tasks = [asyncio.create_task(check_url_by_browser(semaphore, url, queue, progress_counter, all_tasks)) for url in
             urls]
    await asyncio.gather(*tasks)
    await browser.close()


async def status_code(url, queue):
    # requests.packages.urllib3.disable_warnings()
    # response = requests.get(url=url, proxies=proxies, verify=False)
    # return response.status_code
    code = None
    try:
        code = await asyncio.to_thread(requests.get, url=url, proxies=proxies, verify=False)
    except requests.exceptions.ConnectionError as e:
        queue.put(f"Failed: 连接错误: {e}")
        code = e
    except requests.exceptions.Timeout as e:
        queue.put(f"Failed: 请求超时: {e}")
        code = e
    except requests.exceptions.HTTPError as e:
        queue.put(f"Failed: HTTP 错误: {e}")
        code = e
    except requests.exceptions.RequestException as e:
        queue.put(f"Failed: 请求异常: {e}")
    except OSError as e:
        queue.put(f"Failed: 系统错误: {e}")
        code = e
    except ValueError as e:
        queue.put(f"Failed: 参数错误: {e}")
        code = e
    except Exception as e:
        queue.put(f"Failed: 未知错误: {e}")
        code = e
    return code
