import json
import os
import time
import requests
from openpyxl import load_workbook
import asyncio
from pyppeteer import launch
from pyppeteer.errors import TimeoutError
import pyppeteer.errors
from lxml import html
import psutil
from urllib.parse import unquote
from huawei_constants import WHITELIST, proxies, headers


def kill_chromium_process(process_name, queue):
    # 遍历所有进程
    for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
        if not proc.info['cmdline']:
            continue
        if process_name in proc.info['cmdline'][0]:
            try:
                proc.terminate()  # 尝试正常终止进程
                proc.wait(timeout=3)  # 等待3秒看进程是否终止
                if proc.is_running():
                    proc.kill()  # 如果进程仍在运行，则强制终止
            except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
                pass  # 忽略这些异常

    queue.put(f"Processes named '{process_name}' have been terminated.")


def get_urls_from_excel(file_path):
    global target_index
    rb = load_workbook(file_path)
    rs = rb.worksheets[0]
    to_be_check_urls = []
    target_index = 0
    for cell in rs[1]:
        if cell.value == '超链接地址':
            break
        target_index += 1

    for row in rs.iter_rows(min_row=2):
        if row[target_index].value:
            to_be_check_urls.append(row[target_index].value)
    return to_be_check_urls


def check_http_url(urls, queue, test_mode):
    # asyncio.run(http_main(urls))
    asyncio.get_event_loop().run_until_complete(http_main(urls, queue, test_mode))


async def http_main(urls, queue, test_mode):
    mode = test_mode
    await init_browser(mode, queue)
    # 设置允许同时运行的协程数量
    max_concurrent = 10
    semaphore = asyncio.Semaphore(max_concurrent)
    all_tasks = len(urls)
    progress_counter = [0]
    tasks = [asyncio.create_task(check_url_by_browser(semaphore, url, queue, progress_counter, all_tasks)) for url in
             urls]
    await asyncio.gather(*tasks)
    await browser.close()


async def init_browser(mode, queue):
    """
    初始化pypeteer browser实例
    :param mode: 检测模式
    :return: None
    """
    global browser
    queue.put(f'Start initializing chromium')
    if mode:
        browser = await launch(headless=False)
    else:
        browser = await launch(headless=True)


async def status_code(url, queue):
    # requests.packages.urllib3.disable_warnings()
    # response = requests.get(url=url, proxies=proxies, verify=False)
    # return response.status_code
    code = None
    try:
        code = await asyncio.to_thread(requests.get, url=url, verify=False)
    except requests.exceptions.ConnectionError as e:
        queue.put(f"连接错误: {e}")
        code = e
    except requests.exceptions.Timeout as e:
        queue.put(f"请求超时: {e}")
        code = e
    except requests.exceptions.HTTPError as e:
        queue.put(f"HTTP 错误: {e}")
        code = e
    except requests.exceptions.RequestException as e:
        queue.put(f"请求异常: {e}")
    except OSError as e:
        queue.put(f"系统错误: {e}")
        code = e
    except ValueError as e:
        queue.put(f"参数错误: {e}")
        code = e
    except Exception as e:
        queue.put(f"未知错误: {e}")
        code = e
    return code


async def check_url_by_browser(semaphore, url, queue, progress_counter, all_tasks):
    # 判断url是否在白名单中
    if url in WHITELIST:
        queue.put(f'{url}: Pass')
        result_dict[url] = 'Pass'
        progress_counter[0] += 1
        queue.put((progress_counter[0] * 99 // all_tasks))
        return
    if result_dict.get(url):
        progress_counter[0] += 1
        queue.put(f'读取缓存结果成功:{url}:{result_dict[url]}')
        queue.put((progress_counter[0] * 99 // all_tasks))
        return

    async with(semaphore):
        progress_counter[0] += 1
        response = await status_code(url, queue)
        try:
            return_code = response.status_code
            if return_code != 200:
                result_dict[url] = f'Failed 页面访问失败，返回状态码：{return_code}'
                queue.put(f'Failed 页面访问失败，返回状态码：{return_code}')
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            queue.put(f'{url}状态码：{return_code}')
            await asyncio.sleep(1)
        except Exception as e:
            queue.put(f'{url}获取状态码失败：{e}')

        page = await browser.newPage()
        await page.setViewport({'width': 1920, 'height': 1080})
        try:
            response = await page.goto(url, options={'timeout': 60 * 1000})
            await asyncio.sleep(2)
        except Exception as e:
            queue.put(f'访问{url}超时，错误原因：{e}')
            try:
                response = await page.goto(url, options={'timeout': 60 * 1000})
            except TimeoutError as e:
                queue.put(f'{url}：Failed 页面加载超时{e}，请检查')
                result_dict[url] = 'Failed 页面加载超时，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.PageError as e:
                queue.put(f'{url}：Failed {e}，请检查')
                result_dict[url] = f'Failed {e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.NetworkError as e:
                queue.put(f'{url}：Failed {e}，请检查')
                result_dict[url] = f'Failed {e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return

        current_url = page.url
        # 处理转译字符
        unquote_current_url = unquote(current_url, encoding='utf-8')  # 处理转译字符
        unquote_url = unquote(url, encoding='utf-8')
        if unquote_current_url != unquote_url:
            if r'/404' in unquote_current_url:
                result_dict[url] = 'Failed 很抱歉，您访问的页面不存在'
                queue.put(f'{url}：Failed 很抱歉，您访问的页面不存在')
            else:
                result_dict[url] = 'Failed 链接发生了重定向，请检查'
                queue.put(f'{url}：Failed 链接发生了重定向，重定向后的地址为{current_url}请检查')
            await page.close()
            queue.put((progress_counter[0] * 99 // all_tasks))
            return

        if url.startswith(
                'https://developer.huawei.com/') and '#' in url and url != 'https://developer.huawei.com/consumer/cn/support/feedback/#/' and not url.startswith(
                'https://developer.huawei.com/consumer/cn/codelabsPortal'):
            try:
                await page.waitForSelector('h1.doc-title', options={'timeout': 30 * 1000})
            except TimeoutError as e:
                queue.put(f'{url}：Failed 页面加载超时，报错信息：{e}，请检查')
                result_dict[url] = f'Failed 页面加载超时，报错信息：{e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            except pyppeteer.errors.NetworkError as e:
                queue.put(f'{url}：Failed 页面访问网络异常，报错信息：{e}，请检查')
                result_dict[url] = f'Failed 页面访问网络异常，报错信息：{e}，请检查'
                await page.close()
                queue.put((progress_counter[0] * 99 // all_tasks))
                return
            content = await page.content()
            tree = html.fromstring(content)
            sections = tree.xpath('//div[starts-with(@class, "anchor-list")]/div//div/a')
            li_name = tree.xpath('//div//li[@id]') if "#li" in url or '_li' in url else []
            table_ids = tree.xpath('//div[@class="tbBox"]/table')
            current_sections_href_list = []
            for section in sections:
                section_href = section.get('href')
                current_sections_href_list.append(section_href)
                if section_href == unquote_url:
                    result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return
            for li in li_name:
                li_id = li.get('id')
                if unquote_url.split('#')[-1] == li_id:
                    result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return
            for table in table_ids:
                table_id = table.get('id')
                unquote_url = unquote(url, encoding='utf-8')
                if unquote_url.split('#')[-1] == table_id:
                    result_dict[url] = 'Pass'
                    queue.put(f'{url}： Pass')
                    await page.close()
                    queue.put((progress_counter[0] * 99 // all_tasks))
                    return

            queue.put(f'{url}: Failed 未正确跳转到锚点请确认链接中的section id是否正确')
            result_dict[url] = 'Failed 未正确跳转到锚点请确认链接中的section id是否正确'
            await page.close()
            return
        else:
            queue.put(f'{url}: Pass')
            result_dict[url] = 'Pass'
        await page.close()
        queue.put((progress_counter[0] * 99 // all_tasks))


def read_result_cache():
    """读取本地不超过一天时间的本地缓存数据"""
    for filename in os.listdir(os.getcwd()):
        if filename.startswith('result_cache'):
            file_time = int(filename.split("_")[-1][:-5])
            if time.time() - file_time < 3600 * 24:
                with open(filename, 'r', encoding='utf-8') as f:
                    cache = json.load(f)
                return cache
    return {}


def delete_result_cache():
    """任务介绍后删除缓存数据"""
    for filename in os.listdir(os.getcwd()):
        if filename.startswith('result_cache'):
            os.remove(os.path.join(os.getcwd(), filename))


def check_main(file_path, queue, test_mode):
    global result_dict
    result_dict = read_result_cache()  # 读取结果缓存

    http_urls = get_urls_from_excel(file_path)
    unique_http_urls = list(set(http_urls))
    # 通过异步方式检查https链接
    try:
        check_http_url(unique_http_urls, queue, test_mode)
    except Exception as e:  # 如果中途异常推出将结果保存result_cache_{current_time}.json
        current_time = str(int(time.time()))
        delete_result_cache()
        with open(f'result_cache_{current_time}.json', 'w', encoding='utf-8') as f:
            json.dump(result_dict, f, ensure_ascii=False, indent=4)
        return
    # 杀掉未结束的进程
    queue.put(100)
    kill_chromium_process("chromium", queue)
    delete_result_cache()
    queue.put(f'The link check is completed successfully.')
    return result_dict, target_index


if __name__ == '__main__':
    read_result_cache()
