import shutil

import datetime
import os
import json
import re
import time
import sys
import hashlib


# 增加wechat_mp到sys.path
sys.path.append(r'J:\_ALL\CODE\gitee\yeahmao\wechat_mp')
sys.path.insert(0,  r'J:\_ALL\CODE\gitee\constellations\Capricorn')
from wechat_mp import WeChat
from _utils import *
from utils import *


dir_root = r'J:\_ALL\_data\爬虫\公众号历史文章'

def 获取最后一次更新时间(root_dir, mp_name):
    import datetime
    import glob

    lst_files = glob.glob(r'{}\{}*.json'.format(root_dir, mp_name))
    lst_files.sort()
    if len(lst_files) == 0:
        print('获取最后一次更新时间: ', 0)
        return 0

    y, m, d = lst_files[-1].split(mp_name+'--')[1] \
        .split('.json')[0]\
        .split('-')

    last_time = datetime.datetime(int(y), int(m), int(d)).timestamp()
    print('获取最后一次更新时间: ', y, m, d, last_time)
    return last_time


class Main(object):
    def __init__(self):
        self.黑名单Md5 = {}
        return
        EMAIL = '2075756470@qq.com'
        PASSWORD = '9bugaosuni748QQ'
        # 可同时初始化多个不同的账号，例如client1, client2
        self.client = WeChat(email=EMAIL, password=PASSWORD, enable_cookies=True)

    def 下载公众号历史文章json(self, mp_name, save_json_fn, last_time):
        # mp = WeChat(_my_mp_name, cache=True, filename_pkl=r'J:\_ALL\CODE\gitee\constellations\Capricorn\WeChat\my_cmd\data\sessions.pkl')
        accounts = self.client.search_account(mp_name, limit=1)
        print(accounts)
        if len(accounts):
            articles = accounts[0].my_articles(interval=8, limit=10000, last_time=last_time)
            with open(save_json_fn, 'w', encoding='utf-8') as wf:
                json.dump(articles, wf)

    def 下载公众号历史文章json2html(self, root_dir, mp_name, save_json_fn):
        _root_dir = os.path.join(root_dir, mp_name)
        my_mkdirs(_root_dir)
        with open(save_json_fn, 'r', encoding='utf-8') as rf:
            j = json.load(rf)
            print(len(j))
            for item_page in j:
                for item in item_page:
                    fn = '{}--{}--{}.html'.format(
                        my_str_convert_2_path(item['title']),
                        datetime.datetime.utcfromtimestamp(item['update_time']).strftime('%Y-%m-%d_%H-%M-%S'),
                        item['aid']
                    )
                    print('*' * 8, fn)
                    html全路径 = os.path.join(_root_dir, fn)
                    if not os.path.isfile(html全路径):
                        my_download_cache_ez(item['link'], html全路径)
                        time.sleep(1)

    def 下载公众号历史文章html2pic_下载图片(self, _dir, pic):
        """

        :param _dir: 'J:\\_ALL\\_data\\爬虫\\公众号历史文章\\cosplay二次元大全\\cos：原神_八重神子&雷电将军_“我那旧友啊_因为没法边吃点心边看小说_气的内心崩溃_又哭又闹_呜呜呜呜_好可怜啊～”--2022-03-21_11-00-00--2247536556_7'
        :param pic: 'https://mmbiz.qpic.cn/mmbiz_gif/MOFYfibmYcELq0EFIO7TPCBuic6q9dvJvjia8rJGlhoGtD5iclFzMgNSliaOeqUwiaF6YTTibvlu0xicNjAyrQwBoSKzdw/640?wx_fmt=gif'
        :return:
        """
        local_pic = pic.replace('https://', '').replace('http://', '') \
            .replace('?wx_fmt=', '.').replace('/', '`')
        local_pic = os.path.join(_dir, local_pic)
        # 兼容文件名形式的
        if os.path.exists(local_pic):
            return

        # === 转换未md5
        picMd5 = hashlib.md5(pic.encode(encoding='GBK')).hexdigest()
        if 图片UrlMd5在黑名单中(picMd5):
            return
        后缀 = pic.split('wx_fmt=')[-1]
        if 后缀 not in {'gif', 'jpeg', 'jpg', 'png'}:
            assert False
        if 后缀 == 'other':
            return
        if 后缀 == 'gif':
            print(picMd5, pic)
            self.黑名单Md5[picMd5] = pic
        local_pic = os.path.join(_dir, f'{picMd5}.{后缀}')
        assert len(local_pic) < 256

        my_download_cache_ez(pic, local_pic)

    def 下载公众号历史文章html2pic_下载封面(self, _dir, html):
        # TODO：部分公众号封面很漂亮（cosplay部落菌姬酱），特殊处理下
        # js_row_immersive_cover_img.*"(.*\?wx_fmt=.*?)"
        # 封面 = re.findall(r'data-src="(http[s]://mmbiz.*?\?wx_fmt=.*?)"', html)
        pass

    def 下载公众号历史文章html2pic(self, root_dir, mp_name):
        """
        FileNotFoundError: [Errno 2] No such file or directory: 'J:\\_ALL\\_data\\爬虫\\公众号历史文章\\cosplay二次元大全\\
        cos：原神_八重神子&雷电将军_“我那旧友啊_因为没法边吃点心边看小说_气的内心崩溃_又哭又闹_呜呜呜呜_好可怜啊～”--2022-03-21_11-00-00--2247536556_7\\mmbiz.qpic.cn`mmbiz_gif`MOFYfibmYcELq0EFIO7TPCBuic6q9dvJvjia8rJGlhoGtD5iclFzMgNSliaOeqUwiaF6YTTibvlu0xicNjAyrQwBoSKzdw`640.gif'

        :param root_dir:
        :param mp_name:
        :return:
        """
        _root_dir = os.path.join(root_dir, mp_name)
        my_mkdirs(_root_dir)
        lst = os.listdir(_root_dir)
        for fn in lst:
            full_fn = os.path.join(_root_dir, fn)
            if not os.path.isfile(full_fn):
                continue

            print('\n\n>>>  [下载公众号历史文章html2pic] ', full_fn)
            with open(full_fn, 'r', encoding='utf-8') as rf:
                html = rf.read()
                pics = re.findall(r'data-src="(http[s]://mmbiz.*?\?wx_fmt=.*?)"', html)
                print(pics)
                _dir = full_fn[:-1 * len('.html')]
                # # 2024-04-10 修复路径太长问题
                # if len(_dir) > 64:
                #     _dir = _dir[:64]
                #     if os.path.exists(_dir):
                #         print('============', _dir)
                #         shutil.rmtree(_dir)
                # continue
                my_mkdirs(_dir)
                for pic in pics:
                    self.下载公众号历史文章html2pic_下载图片(_dir, pic)
                    self.下载公众号历史文章html2pic_下载封面(_dir, html)

            # time.sleep(0.01)

    def run(self, mp_name):
        save_json_fn = '{}--{}.json'.format(
            mp_name,
            '2024-04-09'
            # time.strftime('%Y-%m-%d', time.localtime(time.time())),
        )
        save_json_fn = os.path.join(dir_root, save_json_fn)
        last_time = 获取最后一次更新时间(dir_root, mp_name)
        # self.下载公众号历史文章json(_my_mp_name, save_json_fn, last_time)
        self.下载公众号历史文章json2html(dir_root, mp_name, save_json_fn)
        self.下载公众号历史文章html2pic(dir_root, mp_name)


def main():
    # _my_mp_name = '夜猫逐梦'
    # _my_mp_name = 'acg美女cosplay营地'
    _my_mp_name = 'cosplay二次元大全'
    m = Main()
    m.run(_my_mp_name)


if __name__ == "__main__":
    print("------------------    Enter __main__    ------------------")

    print(u"[Current work directory is : ]\t" + os.getcwd())
    print(u"[Current process ID is : ]\t" + str(os.getpid()))
    print("\n")
    main()

    print("------------------    Leave __main__    ------------------")
