from threading import Thread
from queue import Queue
from bs4 import BeautifulSoup
from urllib import request,parse
import requests as req
import re
import random
import os
import time

base_url = 'http://download.kaoyan.com'

USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
]

def build_random_headers():
	# 动态设置User-Agent
	user_agent = random.choice(USER_AGENTS)
	headers = {
        'User-Agent' : user_agent
    }
	return headers

save_base_path = "F:test/"

def mkdir(path):
    path = path.strip()
    path = path.rstrip("\\")
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path) 
        return True
    else:
        return False
		
def downloadPdf(file_name, save_path, url):
	pdfFileResponse =  req.get(url, headers=build_random_headers())
	path = save_base_path + save_path
	mkdir(path)
	with open(path + file_name + ".pdf", "wb") as outputPdfFile:
		for chunk in pdfFileResponse.iter_content(100000):
			outputPdfFile.write(chunk)	

class KaoYanBangSpider(Thread):
	def __init__(self, url, data, headers, idx):
		super(KaoYanBangSpider, self).__init__()
		self.url = url
		self.data = data
		self.headers = headers
		self.idx = idx
	
	def run(self):
		self.parse_page()
	
	def parse_page(self):
		req_list = request.Request(url=self.url, data=self.data, headers=self.headers, method="GET")
		response_list = request.urlopen(req_list)
		list_soup = BeautifulSoup(response_list.read(), "html.parser")	
		for item in list_soup.select('tr'):
			name = item.text.strip()
			detail_href_value = item.find('a')['href']
				
			detail_url = base_url + detail_href_value
			response_detail = request.urlopen(detail_url)
			detail_html = response_detail.read()
			detail_soup = BeautifulSoup(detail_html, "html.parser")
				
			download_href_value = detail_soup.select('dl.t_attachlist')[0].find('a')['href']
			download_url = base_url + download_href_value
			response_download = request.urlopen(download_url)
			download_html = response_download.read()
			download_soup = BeautifulSoup(download_html, "html.parser")
			pdf_url = download_soup.select('form')[0]['action']
			# pdf保存路径
			save_path = 'list-' + str(self.idx) + '/'
			try:		
				downloadPdf(name, save_path, pdf_url)
				print("下载成功:", name)
			except BaseException as e:
				print("下载失败:", e)
				continue

def main():
	idx_list = []
	for num in range(51,61):
		idx_list.append(str(num))
	
	page_list = []
	for num in range(1,11):
		page_list.append(str(num))

	Thread_list = []
	for idx in idx_list:
		for page in page_list:
			url = base_url + '/list-' + idx + 'p' + page
			dict = {}
			data = bytes(parse.urlencode(dict), encoding="utf8")
			headers = build_random_headers()
			p = KaoYanBangSpider(url, data, headers, idx)
			p.start()
			Thread_list.append(p)

    # 让主线程等待子线程执行完成
	for i in Thread_list:
		i.join()

if __name__=="__main__":
	start = time.time()
	main()
	end = time.time();
	print('耗时：%s' %(end-start))
        

