from bs4 import BeautifulSoup
from urllib import request,parse
import requests as req
import re
import random
import os

base_url = 'http://download.kaoyan.com'

USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
]

def build_random_headers():
	# 动态设置User-Agent
	user_agent = random.choice(USER_AGENTS)
	headers = {
        'User-Agent' : user_agent
    }
	return headers

save_base_path = "F:data/"

def mkdir(path):
    path = path.strip()
    path = path.rstrip("\\")
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path) 
        return True
    else:
        return False
		
def downloadPdf(file_name, save_path, url):
	pdfFileResponse =  req.get(url, headers=build_random_headers())
	path = save_base_path + save_path
	mkdir(path)
	with open(path + file_name + ".pdf", "wb") as outputPdfFile:
		for chunk in pdfFileResponse.iter_content(100000):
			outputPdfFile.write(chunk)	

# 主函数
start = 31
end = 40
while start <= end:
	headers = build_random_headers()
	page = 1
	while page <= 10:
		try:
			# 设置请求参数
			dict = {}
			data = bytes(parse.urlencode(dict), encoding="utf8")
			url = base_url + '/list-' + str(start) + 'p' + str(page)
			req_list = request.Request(url=url, data=data, headers=headers, method="GET")
			response_list = request.urlopen(req_list)
			list_soup = BeautifulSoup(response_list.read(), "html.parser")		
			for item in list_soup.select('tr'):
				name = item.text.strip()
				detail_href_value = item.find('a')['href']
				
				detail_url = base_url + detail_href_value
				response_detail = request.urlopen(detail_url)
				detail_html = response_detail.read()
				detail_soup = BeautifulSoup(detail_html, "html.parser")
				
				download_href_value = detail_soup.select('dl.t_attachlist')[0].find('a')['href']
				download_url = base_url + download_href_value
				response_download = request.urlopen(download_url)
				download_html = response_download.read()
				download_soup = BeautifulSoup(download_html, "html.parser")
				pdf_url = download_soup.select('form')[0]['action']
				# pdf保存路径
				save_path = 'list-' + str(start) + '/'
				try:		
					downloadPdf(name, save_path, pdf_url)
					print("下载成功:", name)
				except BaseException as e:
					print("下载失败:", e)
					continue
		except BaseException as e:
			print(e)
		finally:
			page = page + 1
	start = start + 1
