
import requests
import time
import csv
import pdfkit
import re
import os
import json
from urllib import request
from lxml import etree
from bs4 import BeautifulSoup


def get_articles(url, cookie, token):

    """
    获取文章列表
    """

    headers = {
        'cookie': cookie,
        'pragma': 'no-cache',
        'referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&token={}&lang=zh_CN'.format(token),
        'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
        'sec-ch-ua-mobile': '?0',
        'sec-fetch-dest': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36',
        'x-requested-with': 'XMLHttpRequest'
    }
    response = requests.get(url, headers=headers)
    try:
        article_list = response.json()["app_msg_list"]
    except:
        return None
    articles = []
    for article in article_list:
        title = article["title"]
        title = re.sub(r"[\/\\\:\*\?\"\<\>\|\.]", "_", title)
        print(title)
        create_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(article["create_time"]))
        link = article["link"]
        articles.append(
            {
                "title": title,
                "create_time": create_time,
                "link": link
            }
        )
    return articles


def create_dir(dir_name):
    """
    创建目录
    """
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)


def download_html(link, gzh_name, article_title):

    """
    解析下载html页面
    """
    resp = requests.get(link)

    html = BeautifulSoup(resp.text, 'lxml')

    try:
        div = html.find('div', attrs={'id': 'js_content'})

        img_tags = div.find_all('img')

        for img_tag in img_tags:
            img_url = img_tag.get('data-src')
            print(img_url)
            title = img_url.split('/')[-2]
            ext = img_url.split('=')[-1]
            if re.findall(r'^https.*\d+$', img_url):
                ext = 'jpeg'
            full_img_name = title + '.' + ext
            # 动态创建文件夹
            create_dir('data/%s' % gzh_name)
            create_dir('data/%s/%s' % (gzh_name, article_title))
            create_dir('data/%s/%s/images' % (gzh_name, article_title))
            request.urlretrieve(img_url, 'data/%s/%s/images/%s' % (gzh_name, article_title, full_img_name))
            img_tag['src'] = 'images/%s' % full_img_name
            with open('data/%s/%s/%s.html' % (gzh_name, article_title, article_title[0:20]), 'w', encoding='utf-8') as f:
                f.write(str(html))
    except Exception as e:
        print(e)


def get_spider_info(url):
    """
    获取必要参数cookie, url, token
    """

    token = re.search(r'token=(\d+)', url).group(1)

    fakeid = re.search(r'fakeid=(.*?)&', url).group(1)

    with open('login.json', 'a', encoding='utf-8') as fp:
        content = {
            "url": url,
            "token": token,
            "fakeid": fakeid
        }
        json.dump(content, fp=fp, ensure_ascii=False, indent=4)

    return url, token, fakeid


def main():
    # 1.1获取cookie
    cookie = input("请输入cookie: ")
    # 1.2输入url地址
    url = input("请输入url地址: ")
    # 1.2创建data文件夹
    create_dir('data')
    # 1.3创建文件目录
    gzh_name = input('请输入微信公众号名称：')
    create_dir('data/%s' % gzh_name)
    # 1.4监听用户输入, 不退出程序
    while True:
        url, token, fakeid = get_spider_info(url)
        # 请输入抓取页数
        page = input('请输入第几页')
        articles = []
        i = int(page)
        # 爬取所有的公众号文章
        while True:
            if i == int(page) + 20:
                break
            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?action=list_ex&begin=%s&count=5&fakeid=%s&type=9&query=&token=%s&lang=zh_CN&f=json&ajax=1" % (
            i * 5, fakeid, token)
            article = get_articles(url, cookie, token)
            if article:
                articles.extend(article)
            else:
                break
            i += 1
            print('正在抓取第%s页的数据，目前总共爬取了%s篇文章' % (i, len(articles)))
            time.sleep(10)

        # 保存为json数据格式
        print('正在保存数据...')
        with open('data/%s/%s.json' % (gzh_name, gzh_name), 'w', encoding='utf-8') as fp:
            json.dump(articles, fp=fp, ensure_ascii=False, indent=4)

        # 保存为csv格式
        with open('data/%s/%s.csv' % (gzh_name, gzh_name), 'w', encoding='utf_8_sig', newline='') as fp:
            csv_writer = csv.DictWriter(fp, fieldnames=['title', 'create_time', 'link'])
            csv_writer.writeheader()
            for article in articles:
                print('正在写入%s' % article['title'])
                csv_writer.writerow(article)

        # 数据保存为html格式
        with open('data/%s/%s.json' % (gzh_name, gzh_name), 'r', encoding='utf-8') as fp:
            content = json.load(fp=fp)
            for item in content:
                download_html(item['link'], gzh_name, item['title'])


def ad():
    print("更新日期：2021-3-12")
    print("微信公众号：哥们并肩走过")
    print("公众号请求次数过多，会触发微信的流量限制, 所以每次只获取100条的数据，分多次获取数据")
    print("初级单线程版本...")
    print("输入页数分别为0, 20, 40...")


if __name__ == '__main__':
    ad()
    main()

