# coding = gbk
# -*- coding:utf-8 -*-

import re
import os
import requests
import copy
import subprocess
import utils
import logging
import traceback

ohos_root_path = ""

# 日志中打印具体的报错位置
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


# 扫描责任田缺失问题
def permission_check(path, gitcode_id, scanResult, resultDic):
    resultList = []
    mdFiles = utils.getMdFile(path)
    if not mdFiles:
        return
    docs_owners_list = utils.getOwners(mdFiles[0])
    owner_list = docs_owners_list["gitcode_id"]

    if gitcode_id not in owner_list:
        resultList.append(
            getResult('permission_check', f'该PR创建者未在Owner名单中，请添加!', ''))

    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["figure_lost"] = len(resultList)


def get_md_owner_info(required_columns, mdPath):
    # 键值对方式存在：整体是标签类别+ 该类别信息，其中类别信息有以下信息：owner_info，各个键对应后边的值；line_number所在行号；not_only_key 标签所在行不仅有标签内容。
    owner_list = {}
    with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
        lines = f.readlines()
        # 去除所有空行
        lines = [line for line in lines if line.strip()]
        for column in required_columns:
            pattern = f'<!--{column}:(.*?)--> *'
            # 该行号为去除空行后的行号
            for i, line in enumerate(lines):
                match = re.search(pattern, line)
                if match:
                    column_info = {}
                    result = match.group(1)
                    column_info['owner_info'] = result
                    column_info['line_number'] = i + 1
                    # print(f'该文档标记{column}，所在行号{i+1},值为：{result}')
                    result_all = match.group(0)
                    if result_all.strip() != line.strip():
                        column_info['not_only_key'] = True
                        # print(f'该文档标记{column}，当前行不是单独一行')
                    else:
                        column_info['not_only_key'] = False
                    owner_list[column] = column_info
                    break
        head_headline = -1
        for i, line in enumerate(lines):
            title_match = re.match(f'^# .*?\n', line)
            if title_match:
                head_headline = i + 1
                break
    return owner_list, head_headline


# 扫描责任田不连续，或者与一级标题之间存在其他内容。0：正常；1，标签之间存在其他内容；2，判断一级标题和责任田之间是否存在内容
def check_owner_line(required_columns, owner_list, head_headline):
    arr = []
    for column in required_columns:
        line = owner_list.get(column, {'line_number': -1}).get('line_number')
        # 如果某一项不存在或者不存在一级标题，则跳过该检测
        if line < 0 or head_headline < 0:
            # 跳过该检测
            return 0
        else:
            arr.append(line)
    lines_set = set(arr)
    # 标签存在同一行，跳过该检查
    if len(lines_set) < 6:
        return 0
    sorted_arr = sorted(arr)  # 将数组排序
    n = len(sorted_arr)
    for i in range(n - 1):
        if sorted_arr[i + 1] != sorted_arr[i] + 1:
            print('标签之间存在其他内容')
            return 1
    if head_headline + 1 != min(arr):
        return 2
    return 0


# <!--Kit: Performance Analysis Kit-->
# <!--Subsystem: HiviewDFX-->
# <!--Owner: @hello_harmony; @yu_haoqiaida-->
# <!--Designer: @kutcherzhou1-->
# <!--Tester: @gcw_KuLfPSbe-->
# <!--Adviser: @gcw_KuLfPSbe-->
def owner_lost(path, code_branch, scanResult, resultDic):
    resultList = []
    mdFiles = utils.getMdFile(path)
    if not mdFiles:
        return
    docs_owners_list = utils.getOwners(mdFiles[0])
    kit_list = docs_owners_list["Kit"]
    subsystem_list = docs_owners_list["Subsystem"]
    for mdPath in mdFiles:
        if check_file_content_is_empty(mdPath):
            continue
        if '/en/' in mdPath:
            continue
        if '/zh-cn/application-dev/' in mdPath and code_branch == 'master':
            try:
                # 待匹配的项
                required_columns = ['Kit', 'Subsystem', 'Owner', 'Designer', 'Tester', 'Adviser']
                # 获取文档中owner全量标签
                owner_list, head_headline = get_md_owner_info(required_columns, mdPath)
                targetMdpath = os.path.relpath(mdPath, ohos_root_path)
                # 判断标签之间是否存在其他内容
                check_statu = check_owner_line(required_columns, owner_list, head_headline)
                if check_statu == 1:
                    resultList.append(
                        getResult('owner_lost', f'该文档责任田标签之间存在其他内容，请处理！',
                                  targetMdpath))
                if check_statu == 2:
                    resultList.append(
                        getResult('owner_lost', f'该文档一级标题和责任田标签之间存在其他内容，请处理！',
                                  targetMdpath))
                for column in required_columns:
                    column_info = owner_list.get(column)
                    if column_info:
                        result = column_info['owner_info']
                        # 去除开头空格
                        result = result.lstrip().rstrip()
                        if result == '' or str.isspace(result):
                            resultList.append(
                                getResult('owner_lost', f'该文档标记{column}为空，请添加!', targetMdpath))
                            continue
                        not_only_key_statu = column_info['not_only_key']
                        if not_only_key_statu:
                            resultList.append(
                                getResult('owner_lost', f'该文档标记{column}所在行还存在其他内容，请处理！',
                                          targetMdpath))
                        if column == 'Kit':
                            for content in result:
                                # 识别内容中的中文
                                if u'\u4e00' <= content <= u'\u9fff':
                                    resultList.append(
                                        getResult('owner_lost', f'该文档标记{column}对应值存在中文，请处理！',
                                                  targetMdpath))
                                    break
                            if result in kit_list:
                                pass
                            else:
                                resultList.append(
                                    getResult('owner_lost',
                                              f'该文档标记{column}对应值：[{result}]不在{column}清单中，请添加!',
                                              targetMdpath))
                        if column == 'Subsystem':
                            for content in result:
                                # 识别内容中的中文
                                if u'\u4e00' <= content <= u'\u9fff':
                                    resultList.append(
                                        getResult('owner_lost', f'该文档标记{column}对应值存在中文，请处理！',
                                                  targetMdpath))
                                    break
                            if result in subsystem_list:
                                pass
                            else:
                                resultList.append(
                                    getResult('owner_lost',
                                              f'该文档标记{column}对应值：[{result}]不在{column}清单中，请添加!',
                                              targetMdpath))
                        # 'Owner', 'Designer', 'Tester', 'Adviser'
                        if column == 'Owner' or column == 'Designer' or column == 'Tester' or column == 'Adviser':
                            check_status = utils.check_gitcode_id(result)
                            if check_status == 1:
                                resultList.append(
                                    getResult('owner_lost',
                                              f'该文档标记{column}，[{result}]账号前未添加@，或格式不符合gitCode账号要求，或多个账号之间未使用分号进行分隔等其他格式问题，请修改!',
                                              targetMdpath))
                            elif check_status == 2:
                                resultList.append(
                                    getResult('owner_lost', f'该文档标记{column}为空，请添加!', targetMdpath))
                            elif check_status == 3:
                                resultList.append(
                                    getResult('owner_lost',
                                              f'该文档标记{column}中责任人数量超过3个，请去除!',
                                              targetMdpath))
                    else:
                        resultList.append(
                            getResult('owner_lost', f'该文档缺失{column}，请补充！', targetMdpath))
            except FileNotFoundError as e:
                print(e)
            except UnicodeDecodeError as e:
                resultList.append(getResult('figure_lost', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["owner_error"] = len(resultList)


# 扫描图片缺失问题
def figureLost(path, scanResult, resultDic):
    '''
    :param path:md文件所在文件夹路径
    :param scanResult: 扫描结果统计信息
    :param resultDic: 扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdPath in mdFiles:
        if check_file_content_is_empty(mdPath):
            continue
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                # 打开链接对应的文档
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    # 识别相对路径
                    pattern1 = re.compile(r'\!\[\]\((?!http).+?\.jpg\)|\!\[\]\((?!http).+?\.png\)')
                    pattern2 = re.compile(r'\!\[\]\(\)')
                    pattern3 = re.compile(r'\!\[[\S]+?\]\((?!http).+?\.jpg\)|\!\[[\S]+?\]\((?!http).+?\.png\)')
                    mixMdUrlArray = re.findall(pattern1, data)
                    pureMdUrlArray = re.findall(pattern3, data)
                    urlArray = mixMdUrlArray + pureMdUrlArray
                    errorUrlArray = re.findall(pattern2, data)
                    mdPathList = mdPath.split('/')
                    mdPathList.pop(-1)
                    pattern4 = re.compile(r'\.\.|\)\s*\!\[\]\(|\)\s*\|\s*\!\[')
                    for originurl in urlArray:
                        exclude = re.findall(pattern4, originurl)
                        if originurl and not exclude:
                            url = originurl.split('](')
                            url = url[1].split('/')
                            url = mdPathList + url
                            figureUrl = '/'.join(url)
                            figureUrl = figureUrl.rstrip(')')
                            result = os.path.exists(figureUrl)
                            if not result:
                                resultList.append(getResult('figure_lost', originurl, targetMdpath))
                    for errorUrl in errorUrlArray:
                        resultList.append(getResult('figure_lost', errorUrl, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('figure_lost', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["figure_lost"] = len(resultList)


# 扫描相对路径断链问题
def relaLinkErrorNew(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdFile in mdFiles:
        if check_file_content_is_empty(mdFile):
            continue
        targetMdpath = os.path.relpath(mdFile, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdFile):
                continue
            else:
                # 打开链接对应的文档
                with open(mdFile, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    if not data:
                        break
                    # 去掉文档中的代码块，避免将代码注释错误判断为一级标题
                    data = utils.get_data_without_code(data)

                    linkReg = re.compile(
                        r'(?<![!\\])\[((?:\[(?:\\.|[^\n\[\]\\])*\]|\\.|`[^\n`]*`|[^\n\[\]\\`])*?)\]\(([^\n]*?)\)')
                    urlArray = re.finditer(linkReg, data)
                    idTotal = getMdIdSet(mdFile)
                    idTotalSet = idTotal.keys()
                    for itemUrl in urlArray:
                        text = itemUrl.group(1)
                        href = itemUrl.group(2).strip()
                        originUrl = itemUrl.group()
                        if not text.strip() or (href and href.startswith(('http://', 'https://'))):
                            continue
                        fileName, ext = os.path.splitext(href)
                        if not href.startswith('#') and not ext:
                            resultList.append(getResult('链接不规范', originUrl, targetMdpath))
                            continue
                        if href.count('#') > 1:
                            resultList.append(getResult('链接不规范', originUrl, targetMdpath))
                            continue
                        # 判读是否在当前文档路径中
                        if href.startswith('#'):
                            sectionId = href[1:]
                            if not (sectionId in idTotalSet):
                                resultList.append(getResult('link_error', originUrl, targetMdpath))
                                continue
                            if sectionId in idTotalSet and idTotal.get(sectionId, -1) == 1:
                                resultList.append(getResult('不可链接到一级标题', originUrl, targetMdpath))
                                continue
                        elif href:
                            error_Type = dealRelativeLink(href, mdFile)
                            if error_Type == 1:
                                resultList.append(getResult('link_error', originUrl, targetMdpath))
                            if error_Type == 2:
                                resultList.append(getResult('不可链接到一级标题', originUrl, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('link_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["link_error"] = len(resultList)


# 扫描文档中示例代码部分缩进异常
def md_style_check_code_space(mdPath, resultList, targetMdpath):
    # 打开链接对应的文档
    with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
        data = f.read()
        pattern = re.compile(r' *```[\s\S]+?```', re.DOTALL)
        matches = list(pattern.finditer(data))
        for match in matches:
            start_position = match.start(0)
            start_line_number = data.count('\n', 0, start_position) + 1
            code_datum = match.group()
            lines = code_datum.splitlines()
            code_space_count_min = len(lines[0]) - len(lines[0].lstrip(" "))
            # 代码块行数
            lens_count = len(lines)
            for line in lines:
                # 空行不检测
                if line.lstrip(" ") == '':
                    continue
                code_space_count = len(line) - len(line.lstrip(" "))
                if code_space_count < code_space_count_min:
                    resultList.append(getResult('md_style_error',
                                                f'错误类型：代码块缩进异常；异常代码块所在位置行号：第 {start_line_number} 行 至 第 {start_line_number + lens_count - 1}。请处理！',
                                                targetMdpath))
                    break


# 通过markdownlint检查：1、序列号格式异常 2、代码块未指定语言 3、代码块格式错误
def run_markdownlint(mdPath, resultList, targetMdpath):
    command = ["npx", "markdownlint-cli2"]
    current_path = os.getcwd()
    config_path = os.path.join(current_path, r'.markdownlint.jsonc')
    command.extend(["--config", config_path])
    command.append(mdPath)

    # 运行命令行
    result = subprocess.run(command, capture_output=True, text=True, check=False)
    # returncode = 1，表示文档中存在格式错误
    if result.returncode == 1:
        lines = result.stderr.splitlines()
        for line in lines:
            error_line_number, error_type = parse_md_style_error(line)
            if error_type == '029':
                resultList.append(getResult('md_style_error',
                                            f'错误类型：序列号格式异常；异常所在行号：第 {error_line_number} 行。请处理！',
                                            targetMdpath))
            if error_type == '040':
                resultList.append(getResult('md_style_error',
                                            f'错误类型：代码块未指定语言；异常所在行号：第 {error_line_number} 行。请处理！',
                                            targetMdpath))
            if error_type == '046':
                resultList.append(getResult('md_style_error',
                                            f'错误类型：代码块格式错误；异常所在行号：第 {error_line_number} 行。请处理！',
                                            targetMdpath))


# 获取markdownlint工具返回的错误类型及为行号。
def parse_md_style_error(line):
    error_line_number = 0
    error_type = 0
    error_line_reg = re.compile(r'\.md:(\d+)')
    match = re.search(error_line_reg, line)
    if match:
        error_line_number = match.group(1)
    error_type_regex = re.compile(r'MD(\d+)')
    match2 = re.search(error_type_regex, line)
    if match2:
        error_type = match2.group(1)
    return error_line_number, error_type


# 检查文档中的"说明"、"注意"是否符合写作规范
def notice_explanation(mdPath, resultList, targetMdpath):
    support_text = ["说明", "注意", "警告", "Note", "note", "Caution", "caution", "Warning", "warning"]
    # 打开链接对应的文档
    with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
        lines = f.readlines()
        for i in range(len(lines)):
            line = lines[i].strip()
            if any(f"**{keyword}" in line for keyword in support_text):
                if re.match(r'^\s*\|.*', line):
                    if i + 1 < len(lines):
                        next_line = lines[i + 1]
                        if re.match(r'^\s*\|([-: ]+\|\s*)+$', next_line):
                            continue
                    # 匹配 **说明：** 或 **注意：** 前后是否有同时紧挨着的<br> 或者 <br/>
                    if not re.search(
                            r'(<br\s*/?>\s*\*\*(说明|注意|警告|[Nn]ote|[Cc]aution|[Ww]arning)(\s*：|\s*:)?\*\*(：|:)?\s*)',
                            line):
                        resultList.append(getResult('notice_explanation',
                                                    f"第{i + 1}行表格总说明或注意格式不符合写作规范，建议使用<br>**说明：**；错误信息：提示语前后紧跟换行标签，错误行{line}",
                                                    targetMdpath))
                else:
                    if '注意事项' in line:
                        continue
                    if not re.match(
                            r'^(>\s*\*\*(说明|注意|警告|[Nn]ote|[Cc]aution|[Ww]arning)(\s*：|\s*:)?\*\*(：|:)?\s*(?=<br>|<br/>))',
                            line) and not re.match(
                        r'^(>\s*\*\*(说明|注意|警告|[Nn]ote|[Cc]aution|[Ww]arning)(\s*：|\s*:)?\*\*(：|:)?\s*)$', line):
                        resultList.append(getResult('notice_explanation',
                                                    f"第{i + 1}行提示语错误；错误信息：提示语应为独立一行、结尾紧跟换行标签，以“>”开头，错误行{line}",
                                                    targetMdpath))


# 检查表格中的格式问题
def is_valid_markdown_table(table):
    position = 0

    lines = table.strip().split('\n')
    lines_count = len(lines)

    if re.search(r'\t', table):
        return lines_count, -1, False, "存在制表符"

    lines = table.strip().split('\n')

    # type Action = 'cut' | 'copy' | 'paste' |
    # 多行的type 类型会被误识别成表格，需要过滤掉
    if re.search(r'^type ([^\n]+) = ', lines[0]):
        return lines_count, position, True, 'type 类型 过滤掉，不检测'

    if len(lines) < 3:
        if re.search(r'\s*\|(\s*[-:]+\s*\|)+\s*$', lines[1]):
            return lines_count, -1, False, "表格至少需要三行（标题行、分隔符行和数据行）"
        else:
            return lines_count, position, True, "误报"

    # 转义的 \| 替换为其他内容，不可直接转成空，不分表格中的某列可能只有\|

    lines = [s.replace('\\|', '管道符转义后内容') for s in lines]

    # 检查标题行
    header = lines[0]

    # header = header.replace('\\|', '')
    if re.search(r'\|\s*?\|', header):
        header = header.replace('管道符转义后内容', '\|')
        return lines_count, position, False, f"标题行格式不正确，存在空列: {header}"

    if not re.match(r'^\s*\|(.*\|){1,20}\s*$', header):
        header = header.replace('管道符转义后内容', '\|')
        return lines_count, position, False, f"标题行格式不正确: {header}"

    if re.match(r'^\s*\|(\s*[-:]+\s*\|)+\s*$', header):
        return lines_count, -1, False, "缺少标题行"

    position += 1
    # 检查分隔符行
    separator = lines[1]
    if not re.match(r'^\s*\|(\s*[-:]+\s*\|)+\s*$', separator):
        separator = separator.replace('管道符转义后内容', '\|')
        return lines_count, -1, False, f"缺少分隔符行、或分隔符行格式不正确，分隔符行为: {separator}"

    # 检查列数是否一致
    header_columns = re.split(r'\s*\|\s*', header.strip().strip('|'))
    separator_columns = re.split(r'\s*\|\s*', separator.strip().strip('|'))

    if len(header_columns) != len(separator_columns):
        return lines_count, -1, False, "标题行和分隔符行的列数不一致"

    # 检查数据行
    for line in lines[2:]:
        position += 1
        if re.search(r'\|\s*?\|', line):
            line = line.replace('管道符转义后内容', '\|')
            return lines_count, position, False, f"数据行格式不正确，存在空列: {line}"

        if not re.match(r'^\s*\|.*\|\s*$', line):
            line = line.replace('管道符转义后内容', '\|')
            return lines_count, position, False, f"数据行格式不正确: {line}"

        data_columns = re.split(r'\s*\|\s*', line.strip().strip('|'))
        if len(data_columns) != len(header_columns):
            line = line.replace('管道符转义后内容', '\|')
            return lines_count, -1, False, f"数据行的列数与标题行不一致: {line}"

    return lines_count, position, True, "表格格式正确"


# 扫描表格中格式问题
def table_scan(mdPath, resultList, targetMdpath):
    table_pattern = re.compile(r'(((\n?\s*\|?(?!#).*\|\n)|(\n\s*\|.*\|?\n))\|?.*\|.*\n(\|?.*\|.*\n?)*)')
    code_block_pattern = re.compile(r'(?s)```.*?```', re.DOTALL)
    # 打开链接对应的文档
    with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
        data = f.read()
        code_block_data = code_block_pattern.sub(lambda m: '\n'.join(['-'] * (m.group(0).count('\n') + 1)), data)
        table_array = table_pattern.finditer(code_block_data)
        for i, match in enumerate(table_array, 0):
            table = match.group(0)
            table, num = remove_blank_lines(table)
            lines_count, position, is_valid, info = is_valid_markdown_table(table)
            if not is_valid:
                # table_start = code_block_data[:match.start()].count('\n')
                start_line = code_block_data[:match.start()].count('\n') + num + 1
                # resultList.append(
                #     getResult('sheet_scan', f'第{start_line + position}行表格错误，错误信息：{info}',
                #               targetMdpath))
                # 返回-1 表示是整个表格的问题
                if position == -1:
                    resultList.append(
                        getResult('table_scan',
                                  f'表格范围：第{start_line}行 - 第{start_line + lines_count - 1}行，错误信息：{info}',
                                  targetMdpath))
                else:
                    resultList.append(
                        getResult('table_scan', f'第{start_line + position}行表格错误，错误信息：{info}',
                                  targetMdpath))


# 表格去除空行
def remove_blank_lines(table):
    # 按换行符分割所有行
    lines = table.splitlines()
    blank_line_count = 0  # 初始化空行计数器
    non_blank_lines = []  # 存储非空行

    for line in lines:
        if line.strip() == '':
            blank_line_count += 1  # 空行则计数+1
        else:
            non_blank_lines.append(line)  # 非空行保留

    # 拼接非空行，恢复换行符
    cleaned_text = '\n'.join(non_blank_lines)
    return cleaned_text, blank_line_count


# 扫描@link
def text_link_error(mdFile, resultList, targetMdpath):
    pattern = re.compile(r'{@link.*?}')
    with open(mdFile, 'r', encoding='utf-8', errors='ignore') as f:
        # 读取文档内容
        lines = f.readlines()
        data = ''.join(lines)
        results = pattern.finditer(data)
        for i, match in enumerate(results, 0):
            res = match.group(0)
            if not re.match(r'{@link\s*\[.*\]\(#.*\)}', res) and not re.match(r'{@link}', res):
                line_number = get_lines_number(match, lines)
                resultList.append(
                    getResult('text_link_error', f'第{line_number}行链接错误；错误信息：{res}', targetMdpath))


# 扫描html标签
def scan_html(mdFile, resultList, targetMdpath):
    all_br_pattern = re.compile(r'<?/?\s*br\s*/?\s*>?')
    all_sub_pattern = re.compile(r'<?sup\s*>?.*<?/?\s*sup\s*/?>?')
    with open(mdFile, 'r', encoding='utf-8', errors='ignore') as f:
        lines = f.readlines()
        data = ''.join(lines)
        all_br_tags = all_br_pattern.finditer(data)
        for i, match in enumerate(all_br_tags, 0):
            br_tag = match.group(0)
            if not re.search(r'</?\s*br\s*/?\s*>', br_tag) and re.search(r'.*[<>].*', br_tag):
                line_number = get_lines_number(match, lines)
                resultList.append(
                    getResult('scan_html_error', f'第{line_number}行br错误；错误信息：{br_tag}', targetMdpath))

        all_sup_tags = all_sub_pattern.finditer(data)
        for i, match in enumerate(all_sup_tags, 0):
            sup_tag = match.group(0)
            if re.search(r'</?\s*sup>', sup_tag) and not re.search(r'<sup\s*>(.*?)</?\s*sup\s*/?>', sup_tag):
                line_number = get_lines_number(match, lines)
                resultList.append(
                    getResult('scan_html_error', f'第{line_number}行sup错误；错误信息：{sup_tag}', targetMdpath))


# 检查markdown中的格式问题
def mdStyleCheck(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdFile in mdFiles:
        try:
            targetMdpath = os.path.relpath(mdFile, ohos_root_path)
            # 文档为空跳过检查
            if check_file_content_is_empty(mdFile):
                continue
            # 判断是否跳过扫描
            if utils.if_no_check(mdFile):
                continue
            else:
                md_style_check_code_space(mdFile, resultList, targetMdpath)
                # run_markdownlint(mdFile, resultList, targetMdpath)
                mdLinkStyleError(mdFile, resultList, targetMdpath)
                notice_explanation(mdFile, resultList, targetMdpath)
                table_scan(mdFile, resultList, targetMdpath)
                text_link_error(mdFile, resultList, targetMdpath)
                scan_html(mdFile, resultList, targetMdpath)
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('md_style_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
        except Exception as e:
            error_message = traceback.format_exc()
            logger.error(f'An error occurred: {error_message}，问题文档{targetMdpath}')
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["md_style_error"] = len(resultList)


def dealRelativeLink(url, curFilePath):
    fileDirName = os.path.dirname(curFilePath)
    targetFullPath = os.path.normpath(os.path.join(fileDirName, url)).replace('\\', '/')
    anchorMdPath = targetFullPath.split('#')[0]

    result = os.path.exists(anchorMdPath)
    if not result:
        return 1

    if '#' in url:
        sectionId = targetFullPath.split('#')[-1]
        targetIdTotal = getMdIdSet(anchorMdPath)
        targetIdTotalSet = targetIdTotal.keys()
        if sectionId not in targetIdTotalSet:
            return 1
        if sectionId in targetIdTotalSet and targetIdTotal.get(sectionId, -1) == 1:
            return 2

    return 0


def getMdIdSet(filePath):
    headerIdList = {}
    with open(filePath, 'r', encoding='utf-8') as sectionFile:
        mdContent = sectionFile.read()
        data = mdContent

    if data:
        # 去掉文档中的代码块，避免将代码注释错误判断为一级标题
        data = utils.get_data_without_code(data)
        headerIdList = getSectionIdList(data)
    return headerIdList


def getSectionIdList(data):
    specialReg = r'[^\u4E00-\u9FA5A-Za-z0-9-_]'
    sectionIdList = {}
    newSlug = {}
    headerReg = re.compile('^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)', re.MULTILINE)
    headerRegList = re.finditer(headerReg, data)
    headerList = list(headerRegList)
    if headerList:
        for header in headerList:
            headerLevel = header.group(1).strip()
            headerText = header.group(2).strip()
            headerText = deleteHeadMoreText(headerText)
            if not headerText:
                continue
            sectionId = re.sub(r'\s', '-', headerText)
            sectionId = re.sub(specialReg, '', sectionId)
            sectionId = sectionId.lower()
            sectionId, curSlug = generate_unique_name(sectionId, newSlug)
            newSlug = curSlug
            sectionIdList[sectionId] = len(headerLevel)
    return sectionIdList


def deleteHeadMoreText(text):
    # 去掉注释 <!--Del-->
    patternComment = r'<!--(?:-?>|[\s\S]*?(?:-->|$))'
    # 去除后尖括号</a> </sup>
    pattern1 = r'(?<!\\)<\/[a-zA-Z][\w:-]*\s*>'
    # 去除前尖括号可带属性的<a> <a name="test"> <sup>
    pattern2 = r'(?<!\\)<[a-zA-Z][\w-]*(?:\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*\'[^\']*\'|\s*=\s*[^\s"\'=<>`]+)?)*?\s*\/?>'
    text = re.sub(patternComment, '', text)
    text = re.sub(pattern1, '', text)
    formatText = re.sub(pattern2, '', text)
    formatText = formatText.replace('&lt;', '<').replace('&gt;', '>').replace('&amp;', '&')
    return formatText.strip()


def generate_unique_name(originName, slug):
    name = originName
    counter = 0
    if not slug:
        slug = {}
    if name in slug:
        counter = slug[name]
        while name in slug:
            counter = counter + 1
            name = f'{originName}-{counter}'
    slug[originName] = counter
    slug[name] = 0
    return name, slug


# 扫描相对路径断链问题
def relaLinkError(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    crossDirLink(mdFiles, resultList)
    crossDocLink(mdFiles, resultList)
    internalDocLink(mdFiles, resultList)
    linkStyleError(mdFiles, resultList)
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["link_error"] = len(resultList)


# 跨目录链接扫描
def crossDirLink(mdFiles, resultList):
    '''
    :param mdFiles: 待扫描的文件的路径列表
    :param resultList: 扫描结果详细信息
    :return: None
    '''

    for mdPath in mdFiles:
        mdPathList = mdPath.split('/')
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                # 打开链接对应的文档
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    # 识别相对路径
                    # "[Release-Notes](../../release-notes/Readme.md)"
                    # "[Release-Notes](./release-notes/Readme.md)"
                    pattern1 = re.compile(r'\(\.\.?/.+?\)')
                    urlArray = re.findall(pattern1, data)
                    for url in urlArray:
                        if url and '.md' in url:
                            # 去掉匹配内容中的括号
                            url = url.replace('(', '')
                            originurl = url.replace(')', '')
                            if '/docs/' in url:
                                resultList.append(
                                    'ErrorLink-->' + originurl + ' : ' + 'locationDoc-->' + targetMdpath + '\n')
                            else:
                                url = originurl.split('/')
                                # 生成被链接到的文件的路径
                                if url and url.count('..') == 1:
                                    url.pop(0)
                                    url = mdPathList[:-2] + url
                                elif url and url.count('..') == 2:
                                    del url[0:2]
                                    url = mdPathList[:-3] + url
                                elif url and url.count('..') == 3:
                                    del url[0:3]
                                    url = mdPathList[:-4] + url
                                elif url and url.count('..') == 4:
                                    del url[0:4]
                                    url = mdPathList[:-5] + url
                                elif url and url[0] == '.' and url[1] != '..':
                                    url = mdPathList[:-1] + url
                                url = '/'.join(url)
                                # 当链接到section时，需要判断被链接到的文件中是否存在该section
                                if '#' in url:
                                    section = url.split('#')[-1]
                                    # 去掉‘sendrequest8-2’后的数字和中划线，同时排除‘send-request8’、‘section12312313414’这种写法
                                    # 将sectionid后面的数字去掉
                                    if len(section) < 2:
                                        pass
                                    else:
                                        if section[-1].isdigit() and section[-2] == '-':
                                            section = section[:-2]
                                    # [on('complete'|'fail')<sup>9+</sup>](#oncomplete--fail9)
                                    if '--' in section:
                                        section = section.replace('--', '')
                                    # 英文会自动给链接添加中划线，去除中划线的影响
                                    section = section.replace('-', ' ')
                                    sectionPath = url.split('#')[0]
                                    try:
                                        with open(sectionPath, 'r', encoding='utf-8', errors='ignore') as sectionFile:
                                            sectionFileData = sectionFile.read()
                                            punctuationlist = ["'|'", "' | '", '.', "\('", "'\)", "<sup>", "</sup>",
                                                               '\(', '\)', "('", "')",
                                                               '"', "(", ")", ":", "'", "：", "“", "（", "）", "+", "\<",
                                                               "\>", "<", ">",
                                                               "\@", "[", "]", "/", "~", "&lt;", "&gt;", "{", "}", "，",
                                                               "@", ",", "、"]
                                            # 链接中是不存在各种特殊符号的，所以在判断前需要把文档中的特殊符号全部清理掉
                                            for punc in punctuationlist:
                                                sectionFileData = sectionFileData.replace(punc, '')
                                            sectionFileData = sectionFileData.replace("-", ' ')
                                            # 判断被链接到的文件中是否存在该section，不存在则判断为断链
                                            section_pattern = re.compile('# *{} *\n'.format(section))
                                            if re.search(section_pattern, sectionFileData.lower()):
                                                pass
                                            else:
                                                resultList.append(getResult('link_error', originurl, targetMdpath))
                                    except OSError as e:
                                        resultList.append(getResult('link_error', originurl, targetMdpath))
                                else:
                                    # 根据路径判断该文件是否存在，不存在则判断为断链
                                    result = os.path.exists(url)
                                    if not result:
                                        resultList.append(getResult('link_error', originurl, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('link_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))


# 同目录内跨文档链接扫描
def crossDocLink(mdFiles, resultList):
    '''
    :param mdFiles: 待扫描的文件的路径列表
    :param resultList: 扫描结果详细信息
    :return: None
    '''
    for mdPath in mdFiles:
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                # 打开链接对应的文档
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    # "[获取源码](sourcecode.md)"
                    # "[Want](js-apis-featureAbility.md#Want类型说明)"
                    # "[FormExtensionContext](/zh-cn/application-dev/reference/apis/js-apis-formextensioncontext.md)"
                    pattern2 = re.compile(r'\[[\S ]+?\]\((?!\.\.|#|http).+?\)')
                    dirUrlArray = re.findall(pattern2, data)
                    for originurl in dirUrlArray:
                        # 最后一个条件排除'[fillStyle](# fillstyle) | &lt;color&gt;&nbsp;\|&nbsp;[CanvasGradient](ts-components-canvas-canvasgradient.md)'
                        if originurl and 'http' not in originurl and '../' not in originurl and '[]' not in originurl \
                                and '.md' in originurl and 'table' not in originurl and '](#' not in originurl and './' not in originurl:
                            url = originurl[originurl.rfind(']') + 1:]
                            url = url.replace('(', '')
                            url = url.replace(')', '')
                            url = os.path.join(os.path.dirname(mdPath), url)
                            # 当链接到section时，需要判断被链接到的文件中是否存在该section
                            if '#' in url:
                                section = url.split('#')[1]
                                # 去掉‘sendrequest8-2’后的数字和中划线，同时排除‘send-request8’、‘section12312313414’这种写法
                                # 将sectionid后面的数字去掉
                                if len(section) < 2:
                                    pass
                                else:
                                    if section[-1].isdigit() and section[-2] == '-':
                                        section = section[:-2]
                                # [on('complete'|'fail')<sup>9+</sup>](#oncomplete--fail9)
                                if '--' in section:
                                    section = section.replace('--', '')
                                # 英文会自动给链接添加中划线，去除中划线的影响
                                section = section.replace('-', ' ')
                                puremdSection = copy.deepcopy(section)
                                # [Resource&nbsp;Group&nbsp;Sub-directories](#resource-group-sub-directories)
                                # [不建议借助@StorageLink的双向同步机制实现事件通知](#不建议借助storagelink的双向同步机制实现事件通知)
                                puremdSection = puremdSection.replace('-', ' ').replace(')', '').replace('(',
                                                                                                         '').replace(
                                    '@', '')
                                sectionPath = url.split('#')[0]
                                # 判断是否存在该文件
                                result = os.path.exists(sectionPath)
                                # 如果被链接到的文件不存在则判定为断链
                                if not result:
                                    resultList.append(
                                        getResult('link_error', originurl, targetMdpath))
                                else:
                                    # 如果文件存在，进一步判断该文件中是否存在该section
                                    with open(sectionPath, 'r', encoding='utf-8', errors='ignore') as sectionFile:
                                        sectionFileData = sectionFile.read()
                                        punctuationlist = ["'|'", "' | '", '.', "\('", "'\)", "<sup>", "</sup>", '\(',
                                                           '\)', "('", "')",
                                                           '"', "(", ")", ":", "'", "：", "“", "（", "）", "+", "\<", "\>",
                                                           "<", ">",
                                                           "\@", "[", "]", "/", "~", "&lt;", "&gt;", "{", "}", "，", "@",
                                                           ",", "、"]
                                        # 链接中是不存在各种特殊符号的，所以在判断前需要把文档中的特殊符号全部清理掉
                                        for punc in punctuationlist:
                                            sectionFileData = sectionFileData.replace(punc, '')
                                        sectionFileData = sectionFileData.replace("-", ' ')
                                        section_pattern = re.compile(
                                            '# +{0} *\n|# +{1} *\n'.format(section, puremdSection))
                                        if re.search(section_pattern, sectionFileData) or re.search(section_pattern,
                                                                                                    sectionFileData.lower()):
                                            pass
                                        elif puremdSection in sectionFileData or puremdSection in sectionFileData.lower():
                                            pass
                                        else:
                                            resultList.append(
                                                getResult('link_error', originurl, targetMdpath))
                            else:
                                # 如果被链接到的文件不存在则判定为断链
                                result = os.path.exists(url)
                                if not result:
                                    resultList.append(
                                        getResult('link_error', originurl, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('link_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))


# 同文档内链接扫描
def internalDocLink(mdFiles, resultList):
    '''
    :param mdFiles: 待扫描的文件的路径列表
    :param resultList: 扫描结果详细信息
    :return: None
    '''
    for mdPath in mdFiles:
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                # 打开链接对应的文档
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    data = data.replace(u'\xa0', u'')
                    # "[接口说明](#section1633115419401)"
                    pattern3 = re.compile(r'\[[\S ]+?\]\(#.+?\)')
                    # "# abilityAccessCtrl.createAtManager"
                    pattern4 = re.compile(r'# .+\.[\S]+')
                    internalUrlArray = re.findall(pattern3, data)
                    # 在文本中去掉链接
                    for internalUrl in internalUrlArray:
                        data = data.replace(internalUrl, '')
                    sectionTitleArray1 = re.findall(pattern4, data)
                    sectionTitleArray2 = []
                    for sectionTitle in sectionTitleArray1:
                        sectionTitle = sectionTitle.lower().replace('.', '')
                        sectionTitleArray2.append(sectionTitle)
                    for internalUrl in internalUrlArray:
                        # 排除"[CanvasGradient](ts - components - canvas - canvasgradient.md) & nbsp;\ | & nbsp;[CanvasPattern](#canvaspattern)"
                        if '.md)' not in internalUrl and '../' not in internalUrl and '.md#' not in internalUrl:
                            section = internalUrl.split('#')[-1]
                            section = section.replace(')', '').replace('(', '')
                            # 去掉‘sendrequest8-2’后的数字和中划线，同时排除‘send-request8’、‘section12312313414’这种写法
                            if len(section) < 2:
                                pass
                            else:
                                if section[-1].isdigit() and section[-2] == '-':
                                    section = section[:-2]
                            # [on('complete'|'fail')<sup>9+</sup>](#oncomplete--fail9)
                            if '--' in section:
                                section = section.replace('--', '')
                            # 英文会自动给链接添加中划线，去除中划线的影响
                            section = section.replace('-', ' ')
                            puremdSection = copy.deepcopy(section)
                            # [Resource&nbsp;Group&nbsp;Sub-directories](#resource-group-sub-directories)
                            # [不建议借助@StorageLink的双向同步机制实现事件通知](#不建议借助storagelink的双向同步机制实现事件通知)
                            puremdSection = puremdSection.replace('-', ' ').replace(')', '').replace('(', '').replace(
                                '@', '')
                            if section == '*\\url' and puremdSection == '*\\url':
                                pass
                            else:
                                linkpattern = re.compile(
                                    r'name[ ]{{0,1}}=[ ]{{0,1}}{0}|#[ ]{{0,5}}{0}[ ]*\n|# {1}[ ]*\n|# \\\*{0}[ ]*\n|name[ ]{{0,1}}=[ ]{{0,1}}\"{0}\"'.format(
                                        section, puremdSection))
                                # 标题：InputMethodEngine<a name="InputMethodEngine"></a>, 链接：[InputMethodEngine](#InputMethodEngine)，链接中大小写都可以
                                linkpattern2 = re.compile(
                                    r'name[ ]{{0,1}}=[ ]{{0,1}}{0}|name[ ]{{0,1}}=[ ]{{0,1}}\"{0}\"'.format(section,
                                                                                                            puremdSection))
                                targetList1 = re.findall(linkpattern2, data)
                                targetList2 = re.findall(linkpattern, data.lower())
                                punctuationlist = ["'|'", "' | '", '.', "\('", "'\)", "<sup>", "</sup>", '\(', '\)',
                                                   "('", "')",
                                                   '"', "(", ")", ":", "'", "：", "“", "（", "）", "+", "\<", "\>", "<",
                                                   ">",
                                                   "\@", "[", "]", "/", "~", "&lt;", "&gt;", "{", "}", "，", "@", ",",
                                                   "、"]
                                for punc in punctuationlist:
                                    data = data.replace(punc, '')
                                # 英文会自动给链接添加中划线，去除中划线的影响
                                data = data.replace('-', ' ')
                                targetList4 = re.findall(linkpattern, data.lower())
                                targetList = targetList1 + targetList2 + targetList4
                                if not targetList:
                                    resultList.append(
                                        getResult('link_error', internalUrl, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('link_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))


# 格式错误链接扫描
def mdLinkStyleError(mdFiles, resultList, targetMdpath):
    # 打开链接对应的文档
    with open(mdFiles, 'r', encoding='utf-8', errors='ignore') as f:
        # 读取文档内容
        data = f.read()
        data_without_code = utils.get_data_without_code(data)
        # 回流报错内容："[ERROR: Invalid link: zh - cn_topic_0000001078401780.xml#xref5788645142713,link:#table1156812588320]"
        pattern5 = re.compile(r'\[ERROR:Invalid link:')
        errorLinkArray = re.findall(pattern5, data_without_code)
        for errorlink in errorLinkArray:
            resultList.append(
                getResult('mdLinkStyleError', f"链接格式错误：{errorlink}", targetMdpath))
        # 处理文档中普通链接[if/else：条件渲染](../ui/rendering-control/arkts-rendering-control-ifelse.md)。
        pattern2 = re.compile(r'(?<!\\)(?<!!)\[.*?\]\(\s*[^\)]*?[\n\)]')
        links = re.findall(pattern2, data_without_code)
        for link in links:
            check_result = check_link_style(link)
            if not check_result:
                resultList.append(getResult('md_LinkStyle_Error', f"链接格式错误：{link}", targetMdpath))
        # 处理图片格式
        pattern = r'(?<![\\])!\[((?:\[(?:\\.|[^\n\[\]\\])*\]|\\.|`[^\n`]*`|[^\n\[\]\\`])*?)\]\(([^\n]*?\.[a-zA-Z]+)(?:\s+["\'](.*?)["\'])?\s*\)'
        # 提取所有匹配项
        links3 = re.findall(pattern, data_without_code)
        for title, uri, name in links3:
            link = ''
            if name != "":
                link = f'![{title}]({uri} \"{name}\")'
            else:
                link = f'![{title}]({uri})'
            check_link = f'![{title}]({uri})'
            check_result = check_link_style(check_link)
            if not check_result:
                resultList.append(getResult('md_LinkStyle_Error', f"图片链接格式错误：{link}", targetMdpath))
        # 处理图片名称中带有小括号的情况：![](./figures/resolve_sliding_white_blocks_cachedCount(5).gif)
        pattern2 = r'!\[.*?\]\([^\)]*\n'
        links4 = re.findall(pattern2, data_without_code)
        for link in links4:
            check_result = check_link_style(link)
            if not check_result:
                resultList.append(getResult('md_LinkStyle_Error', f"图片链接格式错误：{link}", targetMdpath))


# 格式错误链接扫描
def linkStyleError(mdFiles, resultList):
    '''
    :param mdFiles: 待扫描的文件的路径列表
    :param resultList: 扫描结果详细信息
    :return: None
    '''
    for mdPath in mdFiles:
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                # 打开链接对应的文档
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as f:
                    # 读取文档内容
                    data = f.read()
                    data_without_code = utils.get_data_without_code(data)
                    # "[ERROR: Invalid link: zh - cn_topic_0000001078401780.xml#xref5788645142713,link:#table1156812588320]"
                    pattern5 = re.compile(r'\[ERROR:Invalid link:')
                    pattern3 = re.compile(r'!\[.+?\]\([^\.]+\.[^\.]+?\)|!\[ *\]\(.+?\)|!\[.+?\]\( *\)')
                    links = re.findall(pattern3, data_without_code)
                    errorLinkArray = re.findall(pattern5, data_without_code)
                    for errorlink in errorLinkArray:
                        resultList.append(
                            getResult('link_error', errorlink, targetMdpath))
                    for link in links:
                        if not check_link_style(link):
                            resultList.append(getResult('link_error', link, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('link_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))


# 判断格式是否符合链接格式
def check_link_style(link):
    link = link.replace(" ", "").replace('\n', '')
    if '\\]' in link or '\\(' in link or '\\)' in link:
        return True
    # [意图调用]()
    # [](../reference/apis-ability-kit/js-apis-app-ability-uiability.md)
    if link.split('](')[0].strip() == '[' or link.split('](')[1].strip() == ')':
        return False
    elif not link.endswith(')'):
        return False
    # [意图调用](../reference/apis-ability-kit/js-apis-app-ability-uiability.md)
    # [意图调用](js-apis-app-ability-uiability.md)
    elif link.endswith('.md)') and '#' not in link.split('](')[-1]:
        return True
    # [意图调用](../reference/apis-ability-kit/js-apis-app-ability-uiability.md#后台通信能力)
    # [意图调用](js-apis-app-ability-uiability.md#后台通信能力)
    elif '.md#' in link and not link.endswith('.md)'):
        return True
    # [意图调用](#后台通信能力)
    elif '(#' in link and not link.endswith('.md)'):
        return True
    # 图片
    elif '![' in link and (
            '.png)' in link or '.jpg)' in link or '.gif)' in link or '.jpeg)' or '.svg)' in link in link or '.PNG)' in link or '.JPG)' in link or '.GIF)' in link or '.JPEG)' in link or '.SVG)' in link):
        return True
    # 网络链接
    elif '(http://' in link or '(https://' in link:
        return True
    else:
        return False


# 扫描http断链问题
def httpError(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdPath in mdFiles:
        try:
            # 判断是否跳过扫描
            if utils.if_no_check(mdPath):
                continue
            else:
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as openMd:
                    data = openMd.read()
                    pattern = re.compile(r'\(http[s]?:.+?\)')
                    urlArray = re.findall(pattern, data)
                    targetMdpath = os.path.relpath(mdPath, ohos_root_path)
                    for url in urlArray:
                        if url:
                            url = list(url)
                            if ''.join(url[-4:-1]) == ".gz" or ''.join(url[-5:-1]) == ".tar" or \
                                    'github.com' in ''.join(url) or 'gn.googlesource.com' in ''.join(url) \
                                    or 'arkui-x' in ''.join(url):
                                continue
                            else:
                                url.pop(0)
                                url.pop(-1)
                                url = ''.join(url)
                                try:
                                    r = requests.get(url, verify=False, timeout=30)
                                    if r.status_code == 404 or r.status_code == 403:
                                        resultList.append(getResult('http_error', url, targetMdpath))
                                except Exception as e:
                                    if str(type(e)) == "<class 'UnicodeEncodeError'>":
                                        pass
                                    else:
                                        resultList.append(getResult('http_error', url, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('http_error', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["http_error"] = len(resultList)


# 扫描英文文档中的中文字符
def findCN(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdPath in mdFiles:
        try:
            if check_file_content_is_empty(mdPath):
                continue
            if 'zh-cn' in mdPath or utils.if_no_check(mdPath):
                continue
            # 读取目录
            if '/en/' in mdPath:
                # 读取md文件内容
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as openMd:
                    data = openMd.read()
                    targetMdpath = os.path.relpath(mdPath, ohos_root_path)
                    # 遍历内容
                    for content in data:
                        # 识别内容中的中文
                        if u'\u4e00' <= content <= u'\u9fff':
                            resultList.append(getResult('Chinese_in_English', content, targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('Chinese_in_English', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["Chinese_in_English"] = len(resultList)


# 检查文档中的一级标题是否符合写作规范
def checkTitle(path, scanResult, resultDic):
    '''
    :param path:文件路径
    :param scanResult:扫描结果统计信息
    :param resultDic:扫描结果详细信息
    :return: None
    '''
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdPath in mdFiles:
        targetMdpath = os.path.relpath(mdPath, ohos_root_path)
        try:
            if check_file_content_is_empty(mdPath):
                resultList.append(getResult('文档为空', '不可提交空文档', targetMdpath))
                continue
            count = 0
            if utils.if_no_check(mdPath):
                continue
            if '/application-dev/' in mdPath:
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as openMd:
                    lines = openMd.readlines()
                    first_line = check_firstline(lines)
                with open(mdPath, 'r', encoding='utf-8', errors='ignore') as sectionFile:
                    mdContent = sectionFile.read()
                    data = mdContent

                if data:
                    # 去掉文档中的代码块，避免将代码注释错误判断为一级标题
                    data = utils.get_data_without_code(data)
                    specialReg = r'[^\u4E00-\u9FA5A-Za-z0-9-_]'
                    sectionIdList = {}
                    newSlug = {}
                    headerReg = re.compile('^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)', re.MULTILINE)
                    headerRegList = re.finditer(headerReg, data)
                    headerList = list(headerRegList)
                    if headerList:
                        for header in headerList:
                            headerLevel = header.group(1).strip()
                            if len(headerLevel) == 1 and header.group(2).strip() != '':
                                count += 1
                            if len(headerLevel) > 3 and '/zh-cn/application-dev/' in mdPath:
                                resultList.append(getResult('文档标题层级大于3', header.group().strip(), targetMdpath))
                            headerText = header.group(2).strip()
                            headerText = deleteHeadMoreText(headerText)
                            if not headerText:
                                continue
                            sectionId = re.sub(r'\s', '-', headerText)
                            sectionId = re.sub(specialReg, '', sectionId)
                            sectionId = sectionId.lower()
                            sectionId, curSlug = generate_unique_name(sectionId, newSlug)
                            newSlug = curSlug
                            sectionIdList[sectionId] = len(headerLevel)
                    if count == 0:
                        resultList.append(
                            getResult('文档一级标题错误', '文档缺少一级标题或一级标题内容为空', targetMdpath))
                    if count == 1 and not first_line:
                        resultList.append(getResult('文档一级标题错误', '一级标题没有在文档第一行', targetMdpath))
                    if count > 1:
                        resultList.append(
                            getResult('文档一级标题错误', '文档存在多个一级标题，一个文档只能有且只有一个一级标题',
                                      targetMdpath))
        except FileNotFoundError as e:
            print(e)
        except UnicodeDecodeError as e:
            resultList.append(getResult('Chinese_in_English', '该文档编码有问题，请使用utf-8进行编码', targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["文档一级标题错误"] = len(resultList)


# 判断第一行是否为一级标题
def check_firstline(lines):
    '''
    :param lines:文件逐行读取的内容列表
    :return: 如果第一行是一级标题返回True，如果不是返回False
    '''
    # 判断第一行是否是一级标题
    return re.match('^# .*?', lines[0])


# 判断文档是否为空
def check_file_content_is_empty(mdPath):
    with open(mdPath, 'r', encoding='utf-8', errors='ignore') as openMd:
        data = openMd.read()
        if data.strip():
            return False
        else:
            return True


# 获取未转义的尖括号
def get_untranfered_angle_brackets(path, scanResult, resultDic):
    '''
    :param path:要扫描的md文件所在路径
    :return resultList: 识别出的未转义的尖括号
    '''
    resultList = []
    md_files = utils.getMdFile(path)
    pattern = re.compile(r'(?<!\\)\<[a-zA-Z]+(?<!\\)\>')
    for md_file in md_files:
        targetMdpath = os.path.relpath(md_file, ohos_root_path)
        if check_file_content_is_empty(md_file):
            continue
        file_data = utils.get_file_data(md_file)
        # 将文档中的代码块剔除
        file_data = utils.get_data_without_code(file_data)
        # 将不需要扫描的场景过滤掉
        file_data = replace_nocheck_contents(file_data)
        # 获取未转义的尖括号
        untranfered_angle_brackets = re.findall(pattern, file_data)
        if untranfered_angle_brackets:
            for item in untranfered_angle_brackets:
                resultList.append(getResult('尖括号未转义', item, targetMdpath))
    if resultList:
        for result in resultList:
            scanResult.append(result)
        resultDic["scanResult"] = scanResult
        resultDic["pass_case_num"] -= 1
        resultDic["fail_case_num"] += 1
        resultDic["fault_summary"]["尖括号未转义"] = len(resultList)


# 将不需要扫描的场景剔除（针对未转义）
def replace_nocheck_contents(data):
    '''
    :param data:读取的文件数据
    :return data:剔除不需要扫描的内容后的文件数据
    '''
    data = re.sub('<!--.*?(-->)', '', data)
    data = re.sub('<[ /]*br[ /]*?>', '', data)
    data = re.sub('<a.*?a>', '', data)
    data = re.sub('<[ /]*sub[ /]*?>', '', data)
    data = re.sub('<[ /]*sup[ /]*?>', '', data)
    data = re.sub('<[ /]*b[ /]*?>', '', data)
    data = re.sub('<[ /]*li[ /]*?>', '', data)
    data = re.sub('<[ /]*option.*?>', '', data)
    data = re.sub('<[ /]*hr[ /]*?>', '', data)
    data = re.sub('<[ /]*ul[ /]*?>', '', data)
    data = re.sub('\'.*?\'', '', data)
    data = re.sub('<[ /]*strong[ /]*?>', '', data)
    data = re.sub('`.*?`', '', data)
    data = re.sub('<[ /]*p[ /]*?>', '', data)
    data = re.sub('<[ /]*text[ /]*?>', '', data)
    return data


def getResult(errorType, errorInfo, file):
    '''
    :param errorType: 错误类型
    :param errorInfo: 详细错误信息
    :param file: 文件路径
    :return: 错误字典
    '''
    result = {
        'error_type': errorType,
        'error_info': errorInfo,
        'file': file
    }
    return result


def get_lines_number(match, lines):
    start_pos = match.start()
    # 计算行号
    line_number = 1
    for j, line in enumerate(lines, 1):
        if start_pos < len(''.join(lines[:j])):
            line_number = j
            break
    return line_number
