#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------
# Author:    LJ
# Email:     admin@attacker.club

# Date:      2018/4/7
# Description:
# --------------------------------------------------


# detail_page = path + "/" + line.split("/")[-2]
# nternal_url = url_line.split("/")[-2]
# 默认取-1 ，当"/"结尾取-2


# --------------------------------------------------


import os,re
import requests
import time

from urllib.parse import urljoin,urlparse
#相对路径获取绝对路径

import time

# 导入模块


headers = {"Accept": "text/html,application/xhtml+xml,application/xml;",
           "Accept-Encoding": "gzip",
           "Accept-Language": "zh-CN,zh;q=0.8",
           "Referer": "https://www.zmrenwu.com/",
           "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"
           }


#获取URL内容
def req_text(url):
    req = requests.get(url)
    req.encoding = 'utf-8'
    text = req.text

    allurls = re.findall(r'href="([^"]+)"', text)
    HTML = text
    for url_line in allurls:
        if url_line.find(".css") > -1:
            nternal_url = url_line.split("/")[-1]
            HTML = HTML.replace(url_line, '/static/' + nternal_url)
    return  HTML



#保存
def SaveHTML(url,HTML):
    global  getdir
    #css
    if url.find(".css") > -1:
        getdir = "static/"
        getfile = getdir + url.split("/")[-1]

    #page
    elif url.find(root_url) > -1:
        path = urlparse(url).path  # 取相对路径
        getfile = re.findall(r'(\w+.*)', path)[0] #文件路径
        try:
            getdir = re.findall(r'(.*)\/', getfile)[0]
        except:
            pass

    else:
        pass

    # 创建目录
    if not os.path.exists(getdir):
        os.makedirs(getdir)


    elif not os.path.exists(getfile):
        with open(getfile, 'w') as f:
            try:
                f.write(HTML)
                f.close()
                #print("写入文件:", getfile)

            except:
                pass

    else:
        pass

    #print("文件夹：", getdir)
    #print("判断文件夹", getfile)















if __name__ == "__main__":
    root_url = "http://d.nf1.cc/"  # 域名
    index = requests.get(root_url)
    index.encoding = 'utf-8'
    #print(index.text)

    all_urls = re.findall(r'href="([^"]+)"', index.text) #提取所有url

    count = 0
    for url in all_urls :
        if url.find('http') == -1:
            fullurl = urljoin(root_url, url)  # 拼接url
            #print("第一步:",fullurl)



            TEXT = req_text(fullurl)
            #print(TEXT)

            SaveHTML(fullurl,TEXT)
            # 获取绝对路径
            count += 1









            index_urls = re.findall(r'href="([^"]+)"', TEXT)  # 提取所有url

            for index_url in index_urls:
                print("单url:",index_url)
                index_fullurl = urljoin(fullurl, index_url)
                #print("拼接url",index_fullurl)

                count += 1

                if index_fullurl.find(root_url) > -1 and index_fullurl.find("css") == -1 :
                    #print("绝对路径", index_fullurl)

                    TEXT = req_text(index_fullurl)

                    SaveHTML(index_fullurl, TEXT)

    print("\n\033[31m 下载完毕 !!!\033[0m")

    print(count)


