from lxml import html


g_html_content = ''

urlList = []

def parse(html_content):
    # 解析HTML内容
    global g_html_content
    g_html_content = html_content


def pageInfoParse():
    tree = html.fromstring(g_html_content)
    pageInfo_role='/html/body/div[2]/div/div[2]/div/div[4]/p/text()'
    pageinfo = tree.xpath(pageInfo_role)
    print(pageinfo[0])

def modTitle():
    tree = html.fromstring(g_html_content)
    a_elements_xpath = '/html/body/div[2]/div/div[2]/div/div[4]/div[2]/div/div/a'
    a_elements = tree.xpath(a_elements_xpath)

    # 遍历并打印所有a元素的文本内容
    for index, a_element in enumerate(a_elements):

        all_texts = ''.join(a_element.itertext())
        # 去除可能的多余空白字符（比如换行和空格）
        cleaned_texts = ' '.join(all_texts.split())
        print(f'{index + 1} {cleaned_texts}')
        modContent(index+1)
        getAddress(index+1)

def modContent(i):
    tree = html.fromstring(g_html_content)
    modContent_role = f'/html/body/div[2]/div/div[2]/div/div[4]/div[2]/div[{i}]/div[2]'
    modContent = tree.xpath(modContent_role)

    all_texts = ''.join(modContent[0].itertext())
    # 去除可能的多余空白字符（比如换行和空格）
    cleaned_texts = ' '.join(all_texts.split())

    print(cleaned_texts)

def getAddress(i):
    tree = html.fromstring(g_html_content)
    role = f'/html/body/div[2]/div/div[2]/div/div[4]/div[2]/div[{i}]/div[3]/span[1]/span[2]/a/text()'
    content = tree.xpath(role)
    print(content[0])
    urlList.append(content[0])
