import re
import time
import pandas as pd
import requests
from lxml import etree

article_list = []


def get_article(url):
    headers = {
        "authority": "mp.weixin.qq.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "if-modified-since": "Sat, 24 Aug 2024 11:03:09 +0800",
        "sec-ch-ua": "^\\^",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "^\\^Windows^^",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36 Edg/100.0.1185.36"
    }
    response = requests.get(url, headers=headers)
    return response.text


def parse_article(article, count, url):
    global article_list
    clear = lambda x: ' '.join(x).replace('\n', '').strip()
    HTML = etree.HTML(article)
    title = clear(HTML.xpath("//h1[@id='activity-name']//text()"))
    author = clear(HTML.xpath('//*[@id="js_name"]/text()'))
    pattern = r"createTime\s*=\s*'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2})'"
    match = re.search(pattern, article)
    times = match.group(1)
    content1 = HTML.xpath("//section//text()")
    content2 = HTML.xpath("//p//text()")
    content_all=content1+content2
    contents = "  ".join(content_all)

    images = HTML.xpath("//img/@data-src")
    images_list = [{"图片{}".format(index + 1): i} for index, i in enumerate(images)]

    article_data = {
        "标题": title,
        "链接": url,
        "作者": author,
        "发布时间": times,
        "内容": contents,
        "图片": images_list
    }
    print(f"{count + 1}. 正在保存{article_data}")
    article_list.append(article_data)


def save_article():
    global article_list

    # 确定最大图片数量
    max_images = max(len(a['图片']) for a in article_list)

    # 准备DataFrame的列名
    columns = ['标题', '链接', '作者', '发布时间', '内容'] + ['图片{}'.format(i) for i in range(1, max_images + 1)]

    # 准备DataFrame的数据
    formatted_data = []
    for item in article_list:
        row = [item['标题'], item['链接'], item['作者'], item['发布时间'], item['内容']]
        # 将图片链接填充到对应的列
        for pic in item['图片']:
            for key, value in pic.items():
                row.append(value)
        # 填充不足部分为空
        while len(row) < len(columns):
            row.append(None)
        formatted_data.append(row)

    # 创建DataFrame
    df = pd.DataFrame(formatted_data, columns=columns)

    # 写入Excel
    with pd.ExcelWriter('文章Data4.xlsx') as writer:
        df.to_excel(writer, index=False, sheet_name='Sheet1')

    print("Data has been written to Excel.")


if __name__ == '__main__':
    article=""
    with open("merged_urls.txt", "r") as f:
        urls = f.read().split('\n')

    try:
        for count, i in enumerate(urls[500:]):
            if count%10 == 0 and count != 0:
                print(f"休眠15秒，防止反爬")
                time.sleep(15)
            article = get_article(i)
            parse_article(article, count, i)
            time.sleep(1)
    except Exception as e:
        print(e,article)
    finally:
        save_article()