import re
import requests
import time
import os
import sys
import numpy as np

class Logger(object):
    def __init__(self, fileN="Default.log"):
        self.terminal = sys.stdout
        self.log = open(fileN, "a+")
 
    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)
        self.flush() #每次写入后刷新到文件中，防止程序意外结束
    def flush(self):
        self.log.flush()


#index的作用：如果下载中断，下次直接从页码处开始下载，已下载的不会重复下载
def download_page(tag_url, page_info, index):
    
    header = {
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
    }
    dir_tag = tag_url.split('/')[-1]
    if not os.path.exists(dir_tag):
            os.mkdir(dir_tag)

    #page_info = np.array(page_info)
    page_info = page_info[index:]
    page_number = index
    for page in page_info:
        page_url = page[0]
        page_title = page[1]
        #统一页面地址格式
        if 'http' not in page_url:
            page_url = 'https://www.vmgirls.com/' + page_url

        #请求网页内容
        html = get_page_text(page_url, header).text
        if(len(html) == 0):
            break
        
        '''匹配规则，这个网站有几个不同的规则'''
        
        #规则1
        urls = re.findall('<a href="(.*?)" alt=".*?" title=".*?">', html)
        #规则2
        if(len(urls) == 0):
            #规则1匹配不到，使用规则2
            urls = re.findall('<img alt=".*?" width=".*?" height=".*?" class=".*?" data-src="(.*?)"', html)
        #规则3
        if(len(urls) == 0):
            urls = re.findall('<img alt=".*?" src=".*?" style="" title=".*?" data-src="(.*?)"', html)
        if(len(urls) == 0):
            urls = re.findall('<img alt=".*?" src=".*?" style="float:none" data-src=".*?" data-nclazyload="true" data-pagespeed-url-hash=".*?" onload=".*?" data-pagespeed-lsc-url="(.*?)">', html)
        if(len(urls) == 0):
            urls = re.findall('<img alt=".*?" src=".*?" style="float:none;" data-src="(.*?)" .*?>', html)
        if(len(urls) == 0):
            urls = re.findall('<img alt=".*?" src=".*?" title=".*?" data-src="(.*?)" .*?>', html)
            
        #使用页面标题作为文件夹名称
        dir_page = str(page_number) + '_' + page_title
        if not os.path.exists(dir_tag + '/' + dir_page):
            os.mkdir(dir_tag + '/' + dir_page)

        #记录开始时间
        time_start = time.time()
        time_now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_start))

        #保存当前默认的标准输出，即控制台
        #console = sys.stdout
        #将标准输出改为日志文件
        #sys.stdout = open('log.txt', 'a+', encoding='utf-8')
        
        img_number = 0
        print()
        print(time_now + ' 开始下载！')
        for url in urls:
            img_number = img_number + 1
            #if(page_number == 10 and img_number == 6):
            #    continue
            #time.sleep(1)
            print('正在下载第' + str(img_number) + '张图片..................')
            img_type = '.' + url.split('.')[-1]
            img_name = str(img_number) + img_type

            if 'http' in url:
                #img = get_page_text(url, header)
                img = requests.get(url,  headers = header)
            else:
                #img = get_page_text('https://vmgirls.com/'+url, header)
                img = requests.get('https://vmgirls.com/'+url,  headers = header)
            
            with open(dir_tag + '/' + dir_page + '/' + img_name, 'wb') as file:
                file.write(img.content)
            

        time_end = time.time()
        time_now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_end))
        total_time = '%.2f' % (time_end - time_start)
        print(time_now + ' 下载结束！')
        print('总耗时： ' + str(total_time) + ' 秒')
        print(dir_page + '：' + str(img_number) + 'P' + ' 编号：' + str(page_number))
        print('******************************************************************')
        page_number += 1


#返回一个元组，包含页面链接和标题，这网站采用ajax动态加载，要加载更多需要发post请求
def get_page(url,paged, query, page):
    #主页请求地址
    parent_url = url
    #ajax请求地址
    ajax_url = 'https://www.vmgirls.com/wp-admin/admin-ajax.php'
    #请求头
    header = {
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
    }
    #ajax的参数

    '''全部参数，实际上只有paged、query、page三个参数会改变
    param = {
        'append':'list-archive',
        'paged':'3',
        'action':'ajax_load_posts',
        'query':'33',
        'page':'cat',
    }
    '''

    #第一次请求的网页
    html = requests.get(parent_url, headers = header).text

    #ajax动态请求的网页，需要循环请求直到加载完所有页面
    for i in range(2,paged):
        ajax_html = requests.post(ajax_url, headers = header, data = {'append':'list-archive','paged':str(i),'action':'ajax_load_posts','query':str(query),'page':str(page)}).text
        html += ajax_html
    
    stringx = '<a class="media-content" href="(.*?)" title="(.*?)" target="_blank"'
    meta_div = re.findall(stringx, html)
    '''
    meta_div = np.array(meta_div)
    page_address = meta_div[:,0]
    page_title = meta_div[:,1]
    '''
    return meta_div


def get_page_text(page_url, header):
    #请求网页内容
    try:
        response = requests.get(page_url, headers = header, timeout = 5)
        if response.status_code == 200:
            return response
    except requests.exceptions.Timeout:
        global NETWORK_STATUS
        NETWORK_STATUS = False # 请求超时改变状态

    if NETWORK_STATUS == False:
        '''请求超时'''
        for i in range(1, 10):
            i += 1
            response = requests.post(page_url, headers=header, timeout=5)
            if response.status_code == 200:
                return response
        return
    return
        


def print_to_log():
    dir_name = 'log'
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)

    time_now = time.strftime('%Y-%m-%d', time.localtime(time.time()))
    log_filename = time_now + '.log'
    with open(dir_name + '/' + log_filename, 'w', encoding='utf-8') as file:
            sys.stdout = file

if __name__ == '__main__':
    
    NETWORK_STATUS = True # 判断状态变量
    sys.stdout = Logger("log.txt")
    tag_url = 'https://www.vmgirls.com/campus'
    page_info = get_page(tag_url, 6, 33, 'cat')
    download_page(tag_url, page_info, 61)
    

    '''
    #用于打印网页内容修正正则表达式
    header = {
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
    }
    html = get_page_text('https://www.vmgirls.com/1539.html', header).text
    print(html)
    
    urls = re.findall('<img alt=".*?" src=".*?" title=".*?" data-src="(.*?)" .*?>', html)
    print(urls)
    '''

