import json
import requests
import datetime
from time import sleep
import pandas as pd
import os
import openpyxl

def main():
    result = []
    ##需要抓取的页数，如果需要全部爬取就尽可能取大
    for i in range(0, 1000): 
        print(f'Page {i + 1} >>>')
        try:
            i *= 5  
            ##这里的url得把begin后面的0改成变量i，同时fakeid变量和token变量
            url = f"https://mp.weixin.qq.com/cgi-bin/appmsgpublish?sub=list&search_field=null&begin={i}&count=5&query=&fakeid={fakeid}&type=101_1&free_publish_type=1&sub_action=list_ex&fingerprint=25088e2de72ef402c04fcf23048f7807&token={token}&lang=zh_CN&f=json&ajax=11"
            ##元素需要自行获取
            headers = {
                'authority': 'mp.weixin.qq.com',
                'cookie': 'appmsglist_action_3902758705=card; RK=ihtMfuIuVC; ptcz=ee765ec5390ad5f4d447dc546c92cb547efb458e15e917a35e7a8a638a70589f; ua_id=lhgbVEdR7eoAM55XAAAAAEXtzR4avlonyY178Rrsu3s=; wxuin=41949869842877; mm_lang=zh_CN; _clck=3902758705|1|fuj|0; uuid=a359f55191e0a3719033327e87f26e07; rand_info=CAESILhCHa2hMwdb/t6+O8jpsskTmD6+hJy+dlLJ3LIIhg6D; slave_bizuin=3902758705; data_bizuin=3902758705; bizuin=3902758705; data_ticket=yjpbqv6MRXHuqTnqtHGc6ueRPDHgBdHPxKwXGUwFQqpmgGaHQRaxGPPqYvPWNHFv; slave_sid=ZzQ1d19TTmx0OHRWbEVsMmxkYTVNbWlEamphTGk0dWlWRzlCNWRDZW42MmlnTlFTZEVoM2dyeHdwZ05ZRUIycnUxUmI4cEFUaThNVnBrMVV0Y1hHVTB4RDBaX3ZDUzRsZ2NvNnZzSVoyNVFfcVdrNVVtQ0g4eG9yNkRUUjFDVnlFVUQ0V0kwQUh4UlNLVTF5; slave_user=gh_a05fd56560c4; xid=faa90816ee61d8b27eaa78feee814201; _clsk=8mswei|1742962669611|5|1|mp.weixin.qq.com/weheat-agent/payload/record',
                'referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=77&token=1202869592&lang=zh_CN&timestamp=1742962668585',
                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
            }

            res = requests.get(url=url, headers=headers).json()

            # 检查 API 是否返回正确的数据
            if 'publish_page' not in res:
                print("API 可能返回错误:", res)
                continue

            res = json.loads(res['publish_page'])  # 解析 JSON
            
            if 'publish_list' not in res or not res['publish_list']:
                print("No more articles, stopping...")
                continue  # 继续尝试下一页

            for k in res['publish_list']:  # 遍历当天的文章
                k = k['publish_info']
                k = json.loads(k)
                articles = k['appmsgex']  # 获取所有文章
                
                for article in articles:  #  遍历当天所有文章
                    title = article['title']  
                    timeStamp = article['update_time']  
                    dateArray = datetime.datetime.fromtimestamp(timeStamp)  
                    time = dateArray.strftime('%Y-%m-%d %H:%M:%S')  
                    href = article['link']

                    dic = {
                        'Title': title,
                        'Time': time,
                        'Link': href
                    }

                    if dic not in result:  # 避免重复数据
                        result.append(dic)
                        print(dic)

        except Exception as e:
            print(f'Error: {e}')
            continue  # 遇到错误跳过当前页，不影响后续爬取

        sleep(2)  # 防止频繁请求被封

    # 保存到 Excel
    df = pd.DataFrame(result)
    df.to_excel('Article_Summary.xlsx', index=False)
    print("爬取完成，数据已保存！")

if __name__ == '__main__':
    fakeid = 'Mz********NQ=='##需更改为自己公众号的
    token = '12******2'##同上
    main()