import requests, re
from lxml import html, etree

# 按行读取文件到list中
def read_config(config_file='down.txt'):
    with open(config_file, 'r', encoding='utf-8') as file:
        lines = file.readlines()
    return [line.strip() for line in lines if line.strip()]

def fetch_content_by_xpath(url, title_xpath, content_xpath, timeout=30):
    # 发送GET请求
    response = requests.get(url, timeout=timeout)
    response.raise_for_status()  # 如果响应状态码不是 200，则抛出 HTTPError 异常
    
    # 使用lxml解析HTML
    parser = html.fromstring(response.content)
    # with open('output.txt', 'w', encoding='utf-8') as file:
    #     file.write(response.content.decode('utf-8'))

    paragraphs = parser.xpath(title_xpath)
    if len(paragraphs) == 0:
        title = ""
    else:
        title = etree.tostring(paragraphs[0], encoding="utf-8", method='text').decode("utf-8")
    
    # 应用XPath表达式
    paragraphs = parser.xpath(content_xpath)
    if len(paragraphs) == 0:
        return ""
    content = etree.tostring(paragraphs[0], encoding="utf-8", method='html').decode("utf-8")
    content = re.sub(r'<[^>]+>', '\n', content)
    return title + '\n' + content


def download_merge_content():
    conf_lines = read_config()
    # 示例URL和XPath
    title_xpath = conf_lines[0]
    content_xpath = conf_lines[1]
    content_list = ['' for _ in range(len(conf_lines)-2)]

    for i in range(2, len(conf_lines)):
        url = conf_lines[i]
        try:
            content_list[i-2] = fetch_content_by_xpath(url, title_xpath, content_xpath)
        except Exception as e:
            print(f"失败: {url}")
            print(e)
        print(content_list[i-2])

    content = ''.join(content_list)
    # 保存到文件
    try:
        with open('down_out.txt', 'w', encoding='gb18030') as file:
            file.write(content)
    except e:
        with open('down_out.txt', 'w', encoding='UTF-8') as file:
            file.write(content)

if __name__ == "__main__":
    download_merge_content()