# -*- coding: utf-8 -*-
from selenium import webdriver
import os,sys,time
import uuid
from PIL import Image
import math
from io import BytesIO
import json
import requests
from lxml import etree


def fullpage_screenshot(driver, aid):
    # total_width = driver.execute_script("return document.body.offsetWidth")
    page_height = driver.execute_script("return document.body.parentNode.scrollHeight")
    # viewport_width = driver.execute_script("return document.body.clientWidth")
    viewport_height = driver.execute_script("return window.innerHeight")
    print("Total: ({0}), Viewport: ({1})".format(page_height, viewport_height))

    # 计算切分多少屏
    rectangles = []
    num = math.ceil(page_height/viewport_height)
    # num = int(page_height / viewport_height)
    for i in range(0, num, 1):
        rectangles.append((0, viewport_height * i))

    screen_shot_list = []
    total_height = 0
    photo_width = 0
    # 开始截图
    for i in range(0, len(rectangles), 1):
        # 下拉到截取区域
        rectangle = rectangles[i]
        driver.execute_script("window.scrollTo({0}, {1})".format(rectangle[0], rectangle[1]))
        pixels = rectangle[1]
        print("下拉滚动条到截屏区域左上角坐标(x={},y={}), 截取第{}屏".format(rectangle[0], rectangle[1], i))
        time.sleep(3)

        # 截取图片
        screenshot = driver.get_screenshot_as_png()
        screenshot = Image.open(BytesIO(screenshot))
        photo_width = screenshot.size[0]

        # 最后一张截屏
        if rectangle[1] + viewport_height > page_height:
            # 超出比例
            ratio = (page_height - rectangle[1]) / viewport_height
            box = (0, (screenshot.size[1] * (1 - ratio)), photo_width, screenshot.size[1])
            print(box)
            screenshot = screenshot.crop(box)
            print((rectangle[1] + viewport_height - page_height), screenshot.size[1])

        photo_height = screenshot.size[1]
        screen_shot_list.append((screenshot, 0, total_height))
        total_height += photo_height

    # uri = driver.current_url
    # uid = str(uuid.uuid3(uuid.NAMESPACE_URL, uri))
    Image.MAX_IMAGE_PIXELS = None
    stitched_image = Image.new('RGB', (photo_width, total_height))
    for screen in screen_shot_list:
        stitched_image.paste(screen[0], (screen[1], screen[2]))

    ptype = "jpg"
    if pixels > 65000:
        ptype = "png"
    # 裁剪图片
    stitched_image = stitched_image.crop((10, 0, stitched_image.size[0] - 20, stitched_image.size[1]))
    # stitched_image.save("/Users/wdl/wdl/mobile_{}.jpg".format(uid))
    stitched_image_05 = stitched_image.resize((int(stitched_image.size[0] * 0.5), int(stitched_image.size[1] * 0.5)), Image.ANTIALIAS)
    stitched_image_05.save("/Users/wdl/wdl/huimei/Roche/图文/图片/{}.{}".format(aid, ptype))

    # 存储html页面
    html = driver.page_source
    with open("/Users/wdl/wdl/huimei/Roche/图文/文章/{}.html".format(aid), "w") as f:
        f.write(html)


def getProxyIP():
    resp = requests.get("http://47.105.151.143:83/getip?p=https")
    ip = resp.content.decode("utf-8")
    return ip


def pages(dirname):
    files = os.listdir(dirname)

    for i in range(0, len(files), 1):
        filename = files[i]

        if "Store" in filename:
            continue
        filepath = "{}/{}".format(dirname, filename)
        print(filepath)
        content = ""
        with open(filepath) as f:
            content = f.read()

        items = json.loads(content)
        items = items["app_msg_list"]
        # items = list(items.values())
        for cc in range(0, len(items), 1):
            item = items[cc]
            uri = item["link"]
            aid = item["aid"]
            article_name = "/Users/wdl/wdl/huimei/Roche/图文/文章/{}.html".format(aid)

            # 重新下载因网络问题的文章
            if os.path.exists(article_name):
                if os.path.getsize(article_name) > 1024 * 240:
                    continue
                else:
                    content = ""
                    with open(article_name) as f:
                        content = f.read()
                    # if "未连接到互联网" not in content:
                    #     continue
                    if "无法访问此网站" not in content:
                        continue
                    # if "This site can’t be reached" not in content:
                    #     continue

            # proxyip = getProxyIP()
            # chromeOptions = webdriver.ChromeOptions()  # 设置代理
            # chromeOptions.add_argument("--proxy-server={}".format(proxyip))
            # driver = webdriver.Chrome(chrome_options=chromeOptions)
            driver = webdriver.Chrome()
            driver.set_window_size(width=485, height=1050)
            # driver.fullscreen_window()
            print(len(items), cc, aid, uri)

            # uri = "https://mp.weixin.qq.com/s?__biz=MzIwMTQ3MDc4Mw==&mid=2247500310&idx=7&sn=e574e25e9316979bb19827758e6f9b66&chksm=96efe446a1986d503f5f66224269615a48602a771f4008176ad818e1d4d36eee62ee64a60316&scene=38#wechat_redirect"
            driver.get(uri)
            time.sleep(2)
            fullpage_screenshot(driver, aid)

            driver.quit()


def account_history():
    root = "accounts"
    dirnames = os.listdir(root)
    app_msg_list = {}
    for dirname in dirnames:

        if "Store" in dirname:
            continue

        dirname = "{}/{}".format(root, dirname)
        files = os.listdir(dirname)

        for i in range(0, len(files), 1):
            filename = files[i]

            if "Store" in filename:
                continue

            filepath = "{}/{}".format(dirname, filename)

            if os.path.getsize(filepath) < 1000:
                continue

            content = ""
            with open(filepath) as f:
                content = f.read()
            print(filepath)
            html = etree.HTML(content)
            media = html.xpath('//div[@class="weui_media_bd js_media"]')
            for m in media:
                url = m.xpath('./h4[@class="weui_media_title"]/@hrefs')
                if len(url) == 0:
                    continue
                url = url[0]
                mid = ""
                itemidx = ""
                for param in url.split("&"):
                    if "mid=" in param:
                        mid = param.replace("mid=", "")
                    if "idx=" in param:
                        itemidx = param.replace("idx=", "")

                aid = "{}_{}".format(mid, itemidx)
                title = m.xpath('./h4[@class="weui_media_title"]/text()')[0].replace("\"", "").strip()
                create_time = m.xpath('./p[@class="weui_media_extra_info"]/text()')[0]
                create_time = int(time.mktime(time.strptime(create_time, "%Y年%m月%d日")))
                app_msg_list[aid] = {
                    "aid": aid,
                    "appmsgid": filename,
                    "create_time": create_time,
                    "itemidx": itemidx,
                    "link": url,
                    "title": title
                }
                print(create_time, aid, title)

    print(len(app_msg_list))
    with open("/Users/wdl/wdl/huimei/Roche/肝癌/历史文章/all.json", "w") as f:
        f.write(json.dumps(app_msg_list))


def deleted():
    dirname = "/Users/wdl/wdl/huimei/Roche/图文/文章"
    files = os.listdir(dirname)
    for filename in files:
        filepath = "{}/{}".format(dirname, filename)
        # 删除50K以下文件
        if os.path.getsize(filepath) < 1024 * 50:
            print(filepath)
            os.remove("/Users/wdl/wdl/huimei/Roche/图文/图片/{}".format(filename.replace(".html", ".jpg")))
            os.remove(filepath)


if __name__ == '__main__':
    # dirname = "/Users/wdl/wdl/huimei/Roche/公众号文章/肺癌/文章列表/20190419-20191114"
    # pages(dirname)

    rootname = "/Users/wdl/wdl/huimei/Roche/公众号文章/肺癌/文章列表/20200108-20200201"
    dirs = os.listdir(rootname)
    for dirname in dirs:
        if "Store" in dirname:
            continue
        pages("{}/{}".format(rootname, dirname))

    # deleted()
    # account_history()

