import requests
from re import findall
from bs4 import BeautifulSoup
import time
import os

from setuptools.command.build_ext import link_shared_object

from 公众号文章列表 import get_account_list, get_content_list

weixin_title = ""
weixin_time = ""


# 获取微信公众号内容,保存标题和时间
def get_weixin_html(url):
    global weixin_time, weixin_title
    res = requests.get(url)
    soup = BeautifulSoup(res.text, "html.parser")

    # 获取标题
    temp = soup.find('h1')
    if temp:
        weixin_title = temp.string.strip()
    else:
        weixin_title = '未知'

    # 使用正则表达式获取时间
    #    result=findall(r'[0-9]{4}-[0-9]{2}-[0-9]{2}.+:[0-9]{2}',res.text)
    result = findall(r"(\d{4}-\d{1,2}-\d{1,2})", res.text)
    weixin_time = result[0]

    # 获取正文html并修改
    content = soup.find(id='js_content')
    soup2 = BeautifulSoup((str(content)), "html.parser")
    soup2.div['style'] = 'visibility: visible;'
    html = str(soup2)
    pattern = r'http[s]?:\/\/[a-z.A-Z_0-9\/\?=-_-]+'
    result = findall(pattern, html)

    # 将data-src修改为src
    for url in result:
        html = html.replace('data-src="' + url + '"', 'src="' + url + '"')

    return html


# 上传图片至服务器
def download_pic(content,pic_path):
    if not os.path.exists(pic_path):
        os.makedirs(pic_path)

    # 使用正则表达式查找所有需要下载的图片链接
    pattern = r'http[s]?:\/\/[a-z.A-Z_0-9\/\?=-_-]+'
    pic_list = findall(pattern, content)

    for index, item in enumerate(pic_list, 1):
        count = 1
        flag = True
        pic_url = str(item)

        while flag and count <= 10:
            try:
                data = requests.get(pic_url);

                if pic_url.find('png') > 0:
                    file_name = str(index) + '.png'

                elif pic_url.find('gif') > 0:
                    file_name = str(index) + '.gif'
                else:
                    file_name = str(index) + '.jpg'

                with open(pic_path + file_name, "wb") as f:
                    f.write(data.content)

                # 将图片链接替换为本地链接
                content = content.replace(pic_url, pic_path + file_name)

                flag = False
                print('已下载第' + str(index) + '张图片.')
                count += 1
                time.sleep(1)

            except:
                count += 1
                time.sleep(1)

        if count > 10:
            print("下载出错：", pic_url)
    return content


def get_link(dir):
    link = []
    with open(dir, 'r') as file_to_read:
        while True:
            line = file_to_read.readline()
            if not line:
                break
            line = line.strip('\n')
            link.append(line)
    return link


# 定义不允许的字符集合
invalid_chars = '<>:"/\|?*'


# 定义重命名函数
def sanitize_filename(filename):
    for char in invalid_chars:
        filename = filename.replace(char, '')  # 将不允许的字符替换为空字符串
    return filename


def link_down():

    linklist = get_link('link.txt')
    link_size = len(linklist)

    # 获取html
    input_flag = True
    while input_flag:
        #        for j in range(0,s):
        #            pic = str(j)
        j = 1
        for i in linklist:
            weixin_url = i
            path = j
            j += 1
            # weixin_url=input()
            re = findall(r'http[s]?:\/\/mp.weixin.qq.com\/s\/[0-9a-zA-Z_]+', weixin_url)
            if len(re) <= 0:
                print("链接有误，请重新输入!")
                continue
            else:
                input_flag = False

            down_pic(weixin_url, 'pic/' + str(path) + '/')

def down_pic(weixin_url,path):
    content = get_weixin_html(weixin_url)
    content = download_pic(content,path)
    pic_path = path
    # 保存至本地
    with open(pic_path + sanitize_filename(weixin_title) + '.txt', 'w+', encoding="utf-8") as f:
        print(content)
        f.write(content)
    with open(pic_path + sanitize_filename(weixin_title) + '.html', 'w+', encoding="utf-8") as f:
        f.write(content)

    print("标题：《" + weixin_title + "》")
    print("发布时间：" + weixin_time)

# MzkwMjY3MTM2OA==    谦宇阁
# MzkwODc1NTQ0Ng==    浩瀚头像社
# MzkyOTcyMDAwMQ==    阿九图库集
# MzU2Mjc5NjAzNg==    图说趣文馆
# MzkzODczMjI4Mw==    亲清头像

if __name__ == "__main__":

    path = 'account_list.txt'
    account_list = get_account_list(path)
    print(account_list)

    linklist = []
    for account in account_list:
        content_list = get_content_list(15,account[0])
        for content in content_list:
            linklist.append({'name':account[1],'content':content})

    print(linklist)

    # # 获取html
    input_flag = True
    while input_flag:
        j = 1
        account_name = ''
        for link in linklist:
            name = link['name']
            directory = 'pic/' + name + '/' + link['content'].get('aid') + '/'
            if not os.path.exists(directory):
                os.makedirs(directory)
            else:
                print('已经下载过，直接跳过...')
                continue
            weixin_url = link['content'].get('link')
            path = directory
            j += 1
            re = findall(r'http[s]?:\/\/mp.weixin.qq.com\/', weixin_url)
            if len(re) <= 0:
                print("链接有误，请重新输入!")
                continue
            else:
                input_flag = False

            print(weixin_url)
            try:
                down_pic(weixin_url, path)
            except Exception as e:
                pass
