#coding:UTF-8

import urllib2
from bs4 import BeautifulSoup
import time
import sys
import xlsxwriter
import httplib
reload(sys)
sys.setdefaultencoding('utf8')
def scrapyList(url,index):
    print url
    user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
    headers = {'User-Agent': user_agent, 'timeout': 60}
    request = urllib2.Request(url, headers=headers)
    content =""
    try:
        response = urllib2.urlopen(request)
    except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
        print url + '列表页面发生异常', e
        time.sleep(10)
        scrapyList(url)
    else:
        content = response.read()
        soup = BeautifulSoup(content, 'lxml')
        c = soup.find(attrs={"id": "documentContainer"})
        workbook = xlsxwriter.Workbook("/Users/wangyifei/Documents/zq/zq"+str(index)+".xlsx")
        header_style = workbook.add_format({'bold': True})
        header_style.set_bg_color("white")
        header_style.set_align('center')
        header_style.set_color("black")
        worksheet = workbook.add_worksheet("content")
        book_a = c.findAll(attrs={"class": "row"})
        i = 0
        for div in book_a:
            s1 = BeautifulSoup(str(div), 'lxml')
            a = s1.find(attrs={"target": "_blank"})
            scrapyContent("http://www.csrc.gov.cn/pub/zjhpublicofbj" + a["href"].replace("..", ""),i,workbook,worksheet)
            workbook.close()
            i = i +1
            time.sleep(4)


def scrapyContent(url,index,workbook,worksheet):
    print url
    user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
    headers = {'User-Agent': user_agent, 'timeout': 60}
    request = urllib2.Request(url, headers=headers)
    try:
        response = urllib2.urlopen(request)
    except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
        print url + '列表页面发生异常', e
        time.sleep(10)
        scrapyContent(url)
    else:
        content = response.read()
        # print content
        soup = BeautifulSoup(content, 'lxml')
        c = soup.select('#lTitle')


        str = ""
        for t in c:
            str = str + t.get_text().encode('utf-8')+ "\n"

        worksheet.write(index, 0,str )

        cc = soup.select('#ContentRegion > div > p')
        str = ""
        for tt in cc:
            str = str +tt.get_text().encode('utf-8') + "\n"
        worksheet.write(index, 1,str)


for i in range(1 , 147):
    scrapyList('http://www.csrc.gov.cn/pub/zjhpublicofbj/2280/index_887_'+str(i)+'.htm',i)
    time.sleep(6)