import re
import utils
import os
import time

ohos_root_path = ""


def relaLinkErrorNew(path):
    start = time.time_ns()
    resultList = []
    mdFiles = utils.getMdFile(path)
    for mdFile in mdFiles:
        targetMdpath = os.path.relpath(mdFile, ohos_root_path)
        # 判断是否跳过扫描
        if utils.if_no_check(mdFile):
            continue
        else:
            # 打开链接对应的文档
            with open(mdFile, 'r', encoding='utf-8', errors='ignore') as f:
                # 读取文档内容
                data = f.read()
                if not data:
                    break
                # 去掉文档中的代码块，避免将代码注释错误判断为一级标题
                data = utils.get_data_without_code(data)

                linkReg = re.compile(
                    r'(?<![!\\])\[((?:\[(?:\\.|[^\n\[\]\\])*\]|\\.|`[^\n`]*`|[^\n\[\]\\`])*?)\]\(([^\n]*?)\)')
                urlArray = re.finditer(linkReg, data)
                idTotal = getMdIdSet(mdFile)
                idTotalSet = idTotal.keys()
                for itemUrl in urlArray:
                    text = itemUrl.group(1)
                    href = itemUrl.group(2).strip()
                    originUrl = itemUrl.group()
                    if not text.strip() or (href and href.startswith(('http://', 'https://'))):
                        continue
                    fileName, ext = os.path.splitext(href)
                    if not href.startswith('#') and not ext:
                        resultList.append(getResult('链接不规范', originUrl, targetMdpath))
                        continue
                    if href.count('#') > 1:
                        resultList.append(getResult('链接不规范', originUrl, targetMdpath))
                        continue
                    # 判读是否在当前文档路径中
                    if href.startswith('#'):
                        sectionId = href[1:]
                        if not (sectionId in idTotalSet):
                            resultList.append(getResult('link_error', originUrl, targetMdpath))
                            continue
                        if sectionId in idTotalSet and idTotal.get(sectionId, -1) == 1:
                            resultList.append(getResult('不可链接到一级标题', originUrl, targetMdpath))
                            continue
                    elif '.md' in href:
                        error_Type = dealRelativeLink(href, mdFile)
                        if error_Type == 1:
                            resultList.append(getResult('link_error', originUrl, targetMdpath))
                        if error_Type == 2:
                            resultList.append(getResult('不可链接到一级标题', originUrl, targetMdpath))
    print(resultList)
    end = time.time_ns()
    print("用时：  " + str((end - start) / 1_000_000))


def dealRelativeLink(url, curFilePath):
    fileDirName = os.path.dirname(curFilePath)
    targetFullPath = os.path.normpath(os.path.join(fileDirName, url)).replace('\\', '/')
    anchorMdPath = targetFullPath.split('#')[0]

    result = os.path.exists(anchorMdPath)
    if not result:
        return 1

    if '#' in url:
        sectionId = targetFullPath.split('#')[-1]
        targetIdTotal = getMdIdSet(anchorMdPath)
        targetIdTotalSet = targetIdTotal.keys()
        if sectionId not in targetIdTotalSet:
            return 1
        if sectionId in targetIdTotalSet and targetIdTotal.get(sectionId, -1) == 1:
            return 2

    return 0


def getMdIdSet(filePath):
    headerIdList = {}
    with open(filePath, 'r', encoding='utf-8') as sectionFile:
        mdContent = sectionFile.read()
        data = mdContent

    if data:
        headerIdList = getSectionIdList(data)
    return headerIdList


def getSectionIdList(data):
    specialReg = r'[^\u4E00-\u9FA5A-Za-z0-9-_]'
    sectionIdList = {}
    newSlug = {}
    headerReg = re.compile('^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)', re.MULTILINE)
    headerRegList = re.finditer(headerReg, data)
    headerList = list(headerRegList)
    if headerList:
        for header in headerList:
            headerLevel = header.group(1).strip()
            headerText = header.group(2).strip()
            headerText = deleteHeadMoreText(headerText)
            if not headerText:
                continue
            sectionId = re.sub(r'\s', '-', headerText)
            sectionId = re.sub(specialReg, '', sectionId)
            sectionId = sectionId.lower()
            sectionId, curSlug = generate_unique_name(sectionId, newSlug)
            newSlug = curSlug
            sectionIdList[sectionId] = len(headerLevel)
    return sectionIdList


def deleteHeadMoreText(text):
    # 去掉注释 <!--Del-->
    patternComment = r'<!--(?:-?>|[\s\S]*?(?:-->|$))'
    # 去除后尖括号</a> </sup>
    pattern1 = r'(?<!\\)<\/[a-zA-Z][\w:-]*\s*>'
    # 去除前尖括号可带属性的<a> <a name="test"> <sup>
    pattern2 = r'(?<!\\)<[a-zA-Z][\w-]*(?:\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*\'[^\']*\'|\s*=\s*[^\s"\'=<>`]+)?)*?\s*\/?>'
    text = re.sub(patternComment, '', text)
    text = re.sub(pattern1, '', text)
    formatText = re.sub(pattern2, '', text)
    formatText = formatText.replace('&lt;', '<').replace('&gt;', '>').replace('&amp;', '&')
    return formatText.strip()


def generate_unique_name(originName, slug):
    name = originName
    counter = 0
    if not slug:
        slug = {}
    if name in slug:
        counter = slug[name]
        while name in slug:
            counter = counter + 1
            name = f'{originName}-{counter}'
    slug[originName] = counter
    slug[name] = 0
    return name, slug


def getResult(errorType, errorInfo, file):
    '''
    :param errorType: 错误类型
    :param errorInfo: 详细错误信息
    :param file: 文件路径
    :return: 错误字典
    '''
    result = {
        'error_type': errorType,
        'error_info': errorInfo,
        'file': file
    }
    return result


if __name__ == "__main__":
    # 传入txt文件路径
    # path = sys.argv[1]
    path = 'D:\project\personal_project\person_check\\access-control\\result\master误报.txt'
    relaLinkErrorNew(path)
