from lxml import etree
import os, traceback
import lxml.html
import re
from selenium import webdriver
import requests
import time
from urllib import parse
from urllib import request
import json
from html.parser import HTMLParser
from selenium.webdriver.common.by import By
from random import choice


def get_different_rank_url(start_url, rahk_time, rank_style, page_number) -> str:
    start_url += '/ranking?date=' + rahk_time + '&mode=' + rank_style + '&p=' + str(page_number)
    return start_url


def get_different_user_url(start_url, id) -> str:
    start_url += '/user/' + id
    return start_url


#  下载图片，并将图片下载链接保存到list_img_url中
#  img_url_origin为没有后缀的图片原下载链接
def download_img(img_url_origin, list_img_url):
    """
    下载图片
    :param img_url_origin: 没有.jpg, .png的待处理的下载数据 list[dict] dict:{作品名，链接}
    :param list_img_url: 用来保存下载链接的列表，为空列表
    :return:空
    """
    for img in img_url_origin:
        # 排除非法字符的正则
        illegal_chars = r'[<>:"/\\,| ･✫ﾟ˙?*\x00-\x1F]'
        try:
            img_url = str(img[1]) + 'png'
            print(img_url)
            request.urlretrieve(img_url, f'E:/program/flask_vilipixiv/vilipixiv/static/img/{str(re.sub(illegal_chars, "", img[0]))}.png')
            print(img[0] + '.png下载成功')
        except:
            img_url = str(img[1]) + 'jpg'
            request.urlretrieve(img_url, f'E:/program/flask_vilipixiv/vilipixiv/static/img/{str(re.sub(illegal_chars, "", img[0]))}.jpg')
            print(img[0] + '.jpg下载成功')
        # 图片下载链接
        img_download = {img[0]: img_url}
        list_img_url.append(img_download)
    return None


def get_img(html_data, list_like, list_id, list_img_url):
    """
    获得图片信息
    :param html_data: 网页数据
    :param list_like: 存放得到的喜欢数，为空列表list
    :param list_id: 存放作品id，为空列表
    :param list_img_url: 存放作品下载链接，为空列表
    :return:None
    """
    picture_url_list = []
    for i in range(1, 31):
        picture = html_data.xpath(
            f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[1]/ul/li[{i}]/div[1]/a/div[1]/img')
        name = html_data.xpath(f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[1]/ul/li[{i}]/div[2]/a')
        picture_number = html_data.xpath(
            f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[1]/ul/li[{i}]/div[1]/a/div[3]/span')
        picture_like_num = html_data.xpath(
            f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[1]/ul/li[{i}]/div[3]/div/text()')
        if picture:
            # 获取第一个匹配的元素节点
            img_element = picture[0]
            # 获取img元素的src属性值，图片链接，需处理
            picture_image_src = img_element.get('src')
            # 图片名
            picture_name_src = name[0].text
            # 图片数量
            picture_number_src = picture_number[0].text
            # 喜欢该作品人数
            picture_like_num_str = picture_like_num[0].strip()
            # 作品名：喜欢人数
            data_like = {picture_name_src: picture_like_num_str}
            list_like.append(data_like)
            # 图片链接前缀
            picture_url = 'http://img9.vilipix.com/picture/pages/original'
            i = 0
            for img_url_unit in picture_image_src.split('?')[0].split('/'):
                # print(img_url_last)
                if 6 <= i < 10:
                    # 组合链接中间的部分
                    picture_url += '/' + img_url_unit
                # 链接最后组成部分（无图片格式jpg,png）
                if i == 10:
                    img_url_last2 = img_url_unit.split('.')[0].split('_')
                    # 判断是否为复数图片
                    if int(picture_number_src) == 1:
                        one_picture_url = [picture_name_src,
                                           picture_url + '/' + img_url_last2[0] + '_' + img_url_last2[1] + '.']
                        picture_url_list.append(one_picture_url)
                    # 复数图片信息，批量保存
                    else:
                        for j in range(int(picture_number_src)):
                            one_picture_url = [picture_name_src + str(j),
                                               picture_url + '/' + img_url_last2[0] + '_' + 'p' + str(j) + '.']
                            picture_url_list.append(one_picture_url)
                    id_data = {picture_name_src: img_url_last2[0]}
                    list_id.append(id_data)
                i += 1
        else:
            print("未找到匹配的元素节点")
        time.sleep(0.2)
    download_img(picture_url_list, list_img_url)
    return None


def get_img_start(driver, l_like, list_id, list_img_url):
    """
    文件下载前的准备，加载网页数据
    :param driver:
    :param l_like:
    :param list_id:
    :param list_img_url:
    :return: None
    """
    time_stop = [0.5, 0.6, 0.7]
    for i in range(2):
        # 滑动，让图片加载完成
        driver.execute_script("window.scrollBy(0, window.innerHeight);")
        time.sleep(choice(time_stop))
    html = driver.page_source
    time.sleep(1)
    html_data = etree.HTML(html)
    get_img(html_data, l_like, list_id, list_img_url)
    return None


def get_html_source(star_url, my_headers, rank_time, rank_style, list_like, list_id, list_img_url):
    """
    得到网页数据，机器人启动
    :param star_url: 开始的链接
    :param my_headers: 头
    :param rank_time: 列表日期，格式为20230913
    :param rank_style: 排行榜类型，daily，weekly，monthly
    :param list_like:存放带赞数的列表，为空列表
    :param list_id:存放id的列表，为空列表
    :param list_img_url:存放下载链接的列表，为空列表
    :return:None
    """
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    chrome_options.add_argument("disable-blink-features=AutomationControlled")
    chrome_options.add_argument(f"--user-agent={my_headers.get('User-Agent')}")
    driver = webdriver.Chrome(options=chrome_options)
    driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
        Object.defineProperty(navigator, 'webdriver', {
          get: () => undefined
        })
      """
    })
    chrome_options.add_argument("disable-blink-features=AutomationControlled")
    # 获取榜单第一页url，用来找页面的数量
    url_start_rank = get_different_rank_url(star_url, rank_time, rank_style, 1)
    driver.get(url_start_rank)
    scroll_script = "window.scrollBy(0, window.innerHeight);"
    for i in range(3):
        # 滑动，让图片加载完成
        driver.execute_script(scroll_script)
        time.sleep(0.2)
    html = driver.page_source
    html_data = etree.HTML(html)
    # 获取页面数量
    page_number_pre = len(html_data.xpath('//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li'))
    print(page_number_pre)
    page_number = html_data.xpath(f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li[{page_number_pre}]')[0].text

    for i in range(1, int(page_number)):
        if i <= 5:
            driver.find_element(By.XPATH,
                                f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li[{i}]').click()
            get_img_start(driver, list_like, list_id, list_img_url)
        elif i <= int(page_number) - 2:
            driver.find_element(By.XPATH, f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li[6]').click()
            get_img_start(driver, list_like, list_id, list_img_url)
        else:
            driver.find_element(By.XPATH, f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li[7]').click()
            get_img_start(driver, list_like, list_id, list_img_url)
            driver.find_element(By.XPATH, f'//*[@id="__layout"]/div/div[1]/div[2]/section/div/div[2]/ul/li[8]').click()
            get_img_start(driver, list_like, list_id, list_img_url)
    driver.close()
    return html


if __name__ == "__main__":
    url = 'https://www.vilipix.com'
    headers = {
        'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69',
        'Cookie':
            'auth.strategy=local; Hm_lvt_7246eb56b79171a6fe5284a8bf523aa0=1694076593,1694081519,1694141021,1694154244; Hm_lpvt_7246eb56b79171a6fe5284a8bf523aa0=1694154244'
    }
    # [{}, {}] {名，喜欢人数}
    img_list_like = []
    # [{}, {}] {名，id}
    img_id_list = []
    # 图片链接
    img_url = []
    get_html_source(url, headers, '20230910', 'daily', img_list_like, img_id_list, img_url)
    print(img_list_like)
    print(img_id_list)
    print(img_url)
