#!/usr/local/env python3
#author:fonzie

import requests
import platform
from ping3 import ping
from bs4 import BeautifulSoup
import chardet
import os
import socket
from unrar import rarfile
import shutil
headers = {
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
    }

def html(url):
    headers = {
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    raw_html = response.content
    getEncoding = chardet.detect(raw_html)['encoding']
    res = response.content.decode('gbk')

    return res

class analyze(object):
    def __init__(self, content):
        self.soup = BeautifulSoup(content, 'html5lib')

    def description(self):
        t = self.soup.find_all(attrs={'name': 'description'})[0].attrs
        des = t.get('content')
        return des

    def detail(self):
        t = self.soup.find_all(attrs={'id': 'soft-intro'})[0]
        return t

    def downloads_url(self):
        downloads_url = []
        for li in  self.soup.find_all('ul', {'class': 'ul_Address'}):
            for a in li.find_all("li"):
                for href in a.contents:
                    h = href.attrs
                    u = h.get('href')
                    if str(u).startswith('https://pan.baidu.com'):
                        continue
                    downloads_url.append(u)
        print('文件的下载地址:\n', '\n'.join(downloads_url))
        return downloads_url

    def Delay(self, urls):
        t = -1
        u = None
        for url in urls:
            domain = url.split('/')[2].split(':')[0]
            port = None
            if ':' in domain:
                port = (str(domain).split(':')[-1]).split('/')[0]

            try:
                print('检测域名延迟:', domain)
                res = ping(domain, timeout=2)
                # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                # if port:
                #     port = port
                # elif str(domain).startswith('https'):
                #     port=443
                # elif str(domain).startswith('http'):
                #     port = 80


            except Exception as e:
                print(domain + "超时")
                print(e)
                continue
            if res:
                if t == -1:
                    t = res
                    u = url
                elif t >= res:
                    t = res
                    u = url
                else:
                    pass

        return u

    def download_file(self, url, path=r".\files"):
        print('使用url下载:', url)
        if not url:
            print('无效的url')
            return False

        print('开始下载文件:', url)
        r = requests.get(url, headers=headers, verify=False)
        if not os.path.isdir(path):
            os.makedirs(path)

        file_name = url.split('/')[-1]
        srcpath = os.path.join(path, file_name)
        dstpath = './pdf'
        with open(srcpath, 'wb') as f:
            f.write(r.content)

        self.unrar(srcpath, dstpath)



    def unrar(self, srcpath, dstpath='./pdf'):
        if not os.path.isdir(dstpath):
            os.makedirs(dstpath)
        print('解压文件:', srcpath)
        file = rarfile.RarFile(srcpath)
        file.extractall(dstpath)



if __name__ == '__main__':
    res = html('https://www.jb51.net/books/40386.html')
    # res = html('https://www.jb51.net/books/643919.html')
    # print(res)
    an = analyze(res)
    u = an.downloads_url()
    d = an.Delay(u)
    an.download_file(d)