'''
Author: GuoHuan
Date: 2021-11-17 15:51:03
LastEditTime: 2021-12-02 16:45:02
Description: 静态网站爬虫系统
FilePath: \webspider\static-resource-spider.py
'''
#!/bin/usr/env python3
# -*- coding: utf-8 -*-

# 爬取网站资源

__author__ = 'mask'

import urllib.request
import re
import os
import time
from functools import reduce
from pathlib import Path
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import csv
import time  
from colorama import init,Fore
  
#=================需要设置的地方=================

#是否重复下载 
# True无论是否已经下载过，都会重复下载   
# False 当本地已存在相同文件时，不重复下载，直接跳过 
# Flase 时才会断点继续上次未完成的任务
Force = False       

# #检查完整性
# # True时，同时本地任务文件存在，则会进行检测，将本地已经存在的文件剔除出等待下载的任务中
# # 当Force设置为True时 本参数无效果
# Check = False

#是否下载资源 
# True 会下载页面中引用的资源
# False 跳过资源下载
DownloadSourse = True 


#本次需要下载的页面  

#页面可下载范围
#  当html的在下列列表中内，才会进行下载 
downloadfix = ['https://keras.io/api/','http://127.0.0.1'];

#===============================================

IMG_TYPE_ARR = ['jpg', 'png', 'ico', 'gif', 'jpeg', 'svg']

# 正则表达式预编译
# 这里涉及到了非贪婪匹配
# ((?:/[a-zA-Z0-9.]*?)*)
# ((?:/[a-zA-Z0-9.]*)*?)
REG_RESOURCE_TYPE = r'(?:href|src|data\-original|data\-src)=["\'](.+?\.(?:js|css|jpg|jpeg|png|gif|svg|ico|ttf|woff2))[a-zA-Z0-9\?\=\.]*["\']' 
 
regResouce = re.compile(REG_RESOURCE_TYPE, re.S) 
  
  
htmlSourseLen = 0   #用于纪录当前资源列表的长度，如果不相等，则保存当前进度
sourseLen = 0
 

SAVE_PATH = os.path.join(os.path.abspath('.'), 'python-spider-downloads')

downloadedList = [] #已下载的资源列表
sourseList = []     #待下载的资源列表
downloadurlList = []#需要下载的html资源列表


'''
解析URL地址
'''
def parseUrl(url):
    if not url:
        return 
    res = urlparse(url)
    if res is not None:
        tmp = res.path.rfind('/')+1 
        ext = '';
        if tmp > 1:
            fileName = res.path[tmp:]
            if fileName == '':
                fileName = None;
            elif len(fileName.split('.'))>1:
                ext = fileName.split('.')[1];
            else:
                tmp = len(res.path)
                fileName = None;
                ext = '';
        else:
            fileName = None;
        return dict(
            baseUrl=res.scheme+'://' + res.hostname,
            fullPath=res.scheme+'://' + res.hostname+res.path[:tmp],
            protocol=res.scheme+'://',
            domain=res.hostname,
            path=res.path[:tmp],
            fileName=fileName,
            ext=ext,
            params=res.fragment
        )
 


def isCssType(str):
    return str.lower().endswith('.css')


def isJsType(str):
    return str.lower().endswith('.js')


def isImgType(str):
    for ext in IMG_TYPE_ARR:
        if str.endswith('.' + ext):
            return True
def isHtmlType(str): 
    if str.lower().endswith('.html'):
        if str.startswith('http') or str.startswith('https'):
            for s in downloadfix:
                if str.startswith(s):
                    return True;
            return False;
        else:
            return True;
    return False
    
def splitResourceType(source):
    jsList = []
    cssList = []
    imgList = []  
    for s in source:
        if isImgType(s):
            imgList.append(s)
        elif isCssType(s):
            cssList.append(s)
        elif isJsType(s):
            jsList.append(s) 
        else: 
            print('什么类型也不是，解析资源出错！！！：', s)

    return jsList+cssList+imgList 

'''
下载文件
'''
def downloadFile(srcPath, distPath):
    print('\r','[下载][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),srcPath),end = '',flush = True);  
    global downloadedList  
    if distPath in downloadedList:
        print('\r',Fore.YELLOW+'[已下][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),srcPath),end = '',flush = True);  
        return
    #判断文件是否存在，如果已经存在不再进行重复下载
    if not Force and os.path.exists(distPath):
        print('\r',Fore.YELLOW+'[已下][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),srcPath),end = '',flush = True); 
        return 
    try:
        response = urllib.request.urlopen(srcPath)
        if response is None or response.status != 200:
            return print('> 请求异常：', srcPath)
        data = response.read()

        f = open(distPath, 'wb')
        f.write(data)
        f.close() 
        downloadedList.append(distPath) 
        print('\r',Fore.GREEN+'[完成][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),srcPath),end = '',flush = True); 
    except Exception as e:
        print('\r',Fore.RED+'[出错][ {} ]:{}:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),srcPath,e),end = '',flush = True); 
        # print('报错了：', e)


'''
解析路径
eg:
    basePath => F:\Programs\python\python-spider-downloads
    resourcePath => /a/b/c/ or a/b/c
    return => F:\Programs\python\python-spider-downloads\a\b\c
'''
def resolvePath(basePath, resourcePath):
    # 解析资源路径
    res = resourcePath.split('/')
    # 去掉空目录 /a/b/c/ => [a, b, c]
    dirList = list(filter(lambda x: x, res))
    # 目录不为空
    if dirList:
        # 拼接出绝对路径
        resourcePath = reduce(lambda x, y: os.path.join(x, y), dirList)
        dirStr = os.path.join(basePath, resourcePath)
    else:
        dirStr = basePath

    return dirStr
 
def analysSourse(urlDict,content): 
    global sourseList  
    contentList = re.split(r'\s+', content)
    resourceList = [] 
    changeList = [];    #需要替换的文件列表
    for line in contentList:
        resList = regResouce.findall(line)
        if resList is not None:
            resourceList = resourceList + resList 
    resourceList = splitResourceType(resourceList);
    for resourceUrl in resourceList:
        if resourceUrl.startswith('./'):
            resourceUrl = urlDict['fullPath'] + resourceUrl[1:]
            sourseList.append(resourceUrl);
        elif resourceUrl.startswith('//'):
            changeList.append(resourceUrl);
            resourceUrl = 'https:' + resourceUrl  
            sourseList.append(resourceUrl); 
        elif resourceUrl.startswith('/'):
            resourceUrl = urlDict['baseUrl'] + resourceUrl
            sourseList.append(resourceUrl);
        elif resourceUrl.startswith('http') or resourceUrl.startswith('https'):
            sourseList.append(resourceUrl);
            changeList.append(resourceUrl);
        elif not (resourceUrl.startswith('http') or resourceUrl.startswith('https')):
            # static/js/index.js这种情况 
            sourseList.append(urlDict['fullPath'] + resourceUrl);
        else:
            print('> 未知resource url: %s' % resourceUrl) 
    sourseList = list( set(sourseList));
    return changeList;

def analysHerfUrl(urlDict,soup,sourse): 
    elc = soup.find_all('a')

    # if urlDict['domain'] == '127.0.0.1:3101':
    #     urlDict['baseUrl'] = "https://www.w3cschool.cn"
    #     urlDict['fullPath'] = "https://www.w3cschool.cn/tensorflow_python/"
    #     urlDict['domain'] = "www.w3cschool.cn"


    for a in elc:
        if a.get("href"):
            hrefurl = a['href']
            if hrefurl.startswith('//'): #直接计算当前位置是否满足 
                fullUrl = urlDict['protocol'] +hrefurl[2:];
                addDownLoadHtml(fullUrl) 
                tmp = a.get('href')[:].split('/');
                if len(tmp)>2 and tmp[2] == urlDict['domain']:
                    a['href'] = ''
                    for i in range(3,len(tmp)):
                        a['href'] += '/'+tmp[i]
            elif hrefurl.startswith('/') and len(hrefurl)>1:
                fullUrl = urlDict['baseUrl']+hrefurl; 
                tmp = a.get('href')[:].split('/');
                if len(tmp)>2 and tmp[2] == urlDict['domain']:
                    a['href'] = ''
                    for i in range(3,len(tmp)):
                        a['href'] += '/'+tmp[i] 
                #修改路径   a['href'] = href.replace(url, "./");
                addDownLoadHtml(fullUrl) 
            elif hrefurl.startswith('.//') :
                fullUrl = urlDict['fullPath'][0:urlDict['fullPath'].rindex('/',0,len(urlDict['fullPath'])-1)]+hrefurl[2:]
                addDownLoadHtml(fullUrl) 
            elif hrefurl.startswith('https://') or hrefurl.startswith('http://'):
                fullUrl = hrefurl;
                addDownLoadHtml(fullUrl) 
                tmp = fullUrl[:].split('/');
                if len(tmp)>2 and tmp[2] == urlDict['domain']:
                    a['href'] = ''
                    for i in range(3,len(tmp)):
                        a['href'] += '/'+tmp[i]  
            elif hrefurl.startswith('../'):
                fullUrl = urllib.parse.urljoin(urlDict['fullPath'],hrefurl)
                addDownLoadHtml(fullUrl) 
    #开始使用替换连接内容
    
    if len(sourse) > 0: 
        matches = soup.find_all('script') + soup.find_all('link') + soup.find_all('img')
        for match in matches:
            if match.get('src') and not match.get('src').startswith('.'):
                # if match['src'] in sourse: 
                tmp = match.get('src')[:].split('/');
                if len(tmp)>2 and tmp[2] == urlDict['domain']:
                    match['src'] = ''
                    for i in range(3,len(tmp)):
                        match['src'] += '/'+tmp[i] 
                if match['src'].startswith('https://'): 
                    match['src'] = match['src'][7:]
                if match['src'].startswith('http://'):
                    match['src'] = match['src'][6:] 
                if match['src'].startswith('//'):
                    match['src'] = match['src'][1:] 
            elif match.get('href') and not match.get('href').startswith('.'):
                tmp = match.get('href')[:].split('/');
                if len(tmp)>2 and tmp[2] == urlDict['domain']:
                    match['href'] = ''
                    for i in range(3,len(tmp)):
                        match['href'] += '/'+tmp[i] 
                if match['href'].startswith('https://'): 
                    match['href'] = match['href'][7:]
                if match['href'].startswith('http://'):
                    match['href'] = match['href'][6:] 
                if match['href'].startswith('//'):
                    match['href'] = match['href'][1:] 
    
        # for s in sourse:
        #     matches = soup.find_all('script')
        #     for match in matches:
        #         if match.get('src'):
        #             print(match['src']);


def addDownLoadHtml(url):
    # print('input to download:'+url);
    isDown = False;
    for fix in downloadfix:
        if url.startswith(fix):
            isDown = True;
            break;
        isDown = False; 
    if not isDown:
        # print(url+'不在可下载域名内');
        return;
    global downloadurlList   
    if url not in downloadurlList:
        downloadurlList.append(url)
        # downloadurlList.sort()  #不应该排序
     
                    
#分析html内存，找到需要下载的资源
def analysisHtml2GetSourse(downloadUrl,logstr):
    global downloadedList  
    if downloadUrl in downloadedList:  
        print('\r',Fore.YELLOW+'[跳过][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);        
        return True 
 
    downloadedList.append(downloadUrl)
    urlDict = parseUrl(downloadUrl)
    pageName = ''
    if urlDict['fileName'] is None:
        pageName = 'index.html'
    else:
        pageName = urlDict['fileName'] 
    distPath = os.path.join(resolvePath(SAVE_PATH, urlDict['path']), pageName)
    if not Force and os.path.exists(distPath):
        print('\r',Fore.YELLOW+'[跳过][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);             
        return True 
    try:  
        webPage = urllib.request.urlopen(downloadUrl) 
        contents = webPage.read().decode('UTF-8')  
        print('\r','[读取][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);             
        
        soup = BeautifulSoup(contents, 'lxml')
        # soup = BeautifulSoup(data.decode('UTF-8'), 'html.parser')  
        print('\r','[解析][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);             
        sourselist = analysSourse(urlDict,contents)
        analysHerfUrl(urlDict,soup,sourselist)
 
        #开始解析文件
        # analysCss(soup)
        if not os.path.exists(resolvePath(SAVE_PATH, urlDict['path'])):
            os.makedirs(resolvePath(SAVE_PATH, urlDict['path']))

        f = open(distPath, 'wb') 
        f.write(str.encode(soup.prettify(),'utf-8'))
        f.close()
        print('\r',Fore.GREEN+'[完成][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);                  
        return True 
    except Exception as e: 
        print('\r',Fore.RED+'[出错][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadUrl),end = '',flush = True);             
        return False 

def saveCSV(path,datas):
    file = open(path,'w',newline='')
    writer = csv.writer(file,dialect='excel') 
    writer.writerow(datas)
    # file = open(path,'w',newline='')
    # writer = csv.writer(file, delimiter=' ', quotechar=' ', quoting=csv.QUOTE_MINIMAL,dialect='excel')
    # for data in datas:
    #     writer.writerow(data)

def saveHtmlSourse2CSV(woidIdx):      
    
    file = open(os.path.join(SAVE_PATH,'taskidx.txt'),'w')
    file.write(str(woidIdx))
    file.close()
    # print("保存进度成功") 
    global htmlSourseLen
    global sourseLen

    if sourseLen != len(sourseList) and len(sourseList) > 0:
        sourseLen = len(sourseList)
        saveCSV(os.path.join(SAVE_PATH,'tasksourse.csv'),sourseList)
        

    if htmlSourseLen == len(downloadurlList):
        return
    htmlSourseLen = len(downloadurlList) 
    saveCSV(os.path.join(SAVE_PATH,'tasklist.csv'),downloadurlList) 
    #     writer.writerow(data) 

def isUrlIsDownLoad(url):
    urlDict = parseUrl(url)  
    if urlDict['fileName'] is None:
        pageName = 'index.html'
    else:
        pageName = urlDict['fileName'] 
    distPath = os.path.join(resolvePath(SAVE_PATH, urlDict['path']), pageName)
    if os.path.isfile(distPath):
        return True;
    return False;

 

def main(): 
    global SAVE_PATH
    # 首先创建这个站点的文件夹
 
    #终端颜色修改
    init(autoreset=True)
    urlDict = parseUrl(url) 
 
    print('分析的域名：', urlDict) 
    domain = urlDict['domain']

    filePath =  domain
    # 如果是192.168.1.1:8000等形式，变成192.168.1.1-8000，:不可以出现在文件名中
    filePath = re.sub(r':', '-', filePath)
    SAVE_PATH = os.path.join(SAVE_PATH, filePath)

    count = 0
    #判断之前任务是否完成
    if not Force and os.path.isfile(os.path.join(SAVE_PATH,'taskidx.txt')) and os.path.isfile(os.path.join(SAVE_PATH,'tasklist.csv')):
        #将资源解读出来
        file = open(os.path.join(SAVE_PATH,'taskidx.txt'),'r')
        count = int(file.read())
        file.close()
        global downloadurlList
        tasklist = open(os.path.join(SAVE_PATH,'tasklist.csv'),'r',newline='')
        reader = csv.reader(tasklist)
        for row in reader:
            for column in row:
                downloadurlList.append(column)
        if os.path.isfile(os.path.join(SAVE_PATH,'tasksourse.csv')):
            global sourseList
            tasksourse = open(os.path.join(SAVE_PATH,'tasksourse.csv'),'r',newline='')
            reader = csv.reader(tasksourse)
            for row in reader:
                for column in row:
                    sourseList.append(column) 
            # for row in reader:
            #     sourseList.append(row)
        # if Check :
        #     nums = 0
        #     for i in range(count - nums):
        #         if not isUrlIsDownLoad(downloadurlList[i]):
        #             dutl = downloadurlList.pop(i)
        #             downloadurlList.append(dutl)
        #             i -= 1
        #             nums += 1 
        #     count -= nums;   
        print('发现未完成的任务[ {:0>4d}/{} ],即将从未完成处开始'.format(count,len(downloadurlList)));  
    else:
        analysisHtml2GetSourse(url,'初始');  
        saveHtmlSourse2CSV(0)
    
    while count<len(downloadurlList):
    # for i in range(count,len(downloadurlList)): 
        logstr = '{:0>4d}/{:0>4d}'.format((count+1),len(downloadurlList))
        print('')
        print('\r','[下载][ {} ][ {} ]:{}'.format(time.strftime("%Y-%m-%d %H:%M:%S:", time.localtime()),logstr,downloadurlList[count]),end = '',flush = True); 
        if analysisHtml2GetSourse(downloadurlList[count],logstr):
            saveHtmlSourse2CSV(count)
        count +=1
    
    if not DownloadSourse:
        print('')
        print('-----------------全部页面完成------------------') 
        print('总共下载了%d个页面' % len(downloadurlList))
        print('跳过资源下载')
        return;
    
    print('')
    print('=====开始下载资源=======')
    for resourceUrl in sourseList:  
        # 解析文件，查看文件路径
        resourceUrlDict = parseUrl(resourceUrl)
        if resourceUrlDict is None:
            print('> 解析文件出错：%s' % resourceUrl)
            continue

        resourceDomain = resourceUrlDict['domain']
        resourcePath = resourceUrlDict['path']
        resourceName = resourceUrlDict['fileName']
        
        if resourceDomain != domain:
            # print('> 该资源不是本网站的，也下载：', resourceDomain)
            # 如果下载的话，根目录就要变了
            # 再创建一个目录，用于保存其他地方的资源
            resourceDomain =  re.sub(r':', '-', resourceDomain)
            savePath = os.path.join(SAVE_PATH, resourceDomain)
            if not os.path.exists(SAVE_PATH):
                # print('> 目标目录不存在，创建：', savePath)
                os.makedirs(savePath)
            # continue
        else:
            savePath = SAVE_PATH



        # # 解析资源路径
        dirStr = resolvePath(savePath, resourcePath)

        if not os.path.exists(dirStr):
            # print('> 目标目录不存在，创建：', dirStr)
            os.makedirs(dirStr)
        


        # 写入文件
        if resourceName!=None:
            downloadFile(resourceUrl, os.path.join(dirStr, resourceName))
        else:
            downloadFile(resourceUrl,dirStr)
        print('')
 

    print('-----------------下载完成------------------')
    print('总共下载了%d个资源' % len(downloadedList))


if __name__ == '__main__':
    main()
    pass