#coding:utf-8
#博图列表页
from bs4 import BeautifulSoup
import requests
import os
import pymysql as mysql
import time
import math


import configparser

config=configparser.ConfigParser()	
with open("config.ini","r") as cfgfile:
	config.readfp(cfgfile)
	ROOT = config.get( "fileroot", "root" )

proxies = {
  "http": "192.168.30.176:8171",
}
hdrs = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.108 Safari/537.36',
		'Cookie':'ASP.NET_SessionId=wnmlwinh2oyrcdq3nwuu3i45',
		'Host':'www.cnbooksearch.com',
		'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',}

def down(name,url,root):
	try:
		r = requests.get(url, proxies=proxies,headers = hdrs,timeout = 10)
	except:
		print(url)
		print("不可知的 错误")
		time.sleep(300)
		return
	soup = BeautifulSoup(r.content, 'lxml')
	div = soup.find('div',id = "xulie2")
	b = soup.find("b",class_ = "blueb")
	# print(b.text)
	# return b.text
	all_page = div.find_all('a')
	for page in range(len(all_page)-2):
		url2 = url+'&author=&page='+str(page)
		down_one(name,page,url2,root)
def down_one(name,page,url,root):
	file_path = os.path.join(root,name)
	if not os.path.exists(file_path):
		os.makedirs(file_path)
	file = os.path.join(file_path,str(page)+".html")
	if os.path.exists(file):
		print("文件存在")
		return
	try:
		r = requests.get(url, proxies=proxies,headers = hdrs,timeout = 10)
	except:
		print(url)
		print("不可知的 错误 2 ")
		time.sleep(300)
		return
	soup = BeautifulSoup(r.content, 'lxml')
	div = soup.find('div',class_ = "Cs_main")
	if not div:
		print("网页错误")
		return
	with open(file,'wb') as f:
		f.write(r.content)
	print(name+": 第"+str(page)+"下载成功")
if __name__=="__main__":
	root = os.path.join(ROOT, "20181012","list")
	if not os.path.exists(root):
		os.makedirs(root)
	for x in range(1):
		cnt = 0
		url = "http://202.202.244.21:8088/Categories.aspx?name="
		list = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
		for name in list:
			url_name = url+name
			down(name,url_name,root)
			# cnt = cnt+int(num)
		# print(cnt)