#coding:UTF-8

import urllib2
from bs4 import BeautifulSoup
import time
import sys
import xlsxwriter
import httplib
import logging
reload(sys)
sys.setdefaultencoding('utf8')
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='E:\zj\zj.log',
                    filemode='w')
ls_not = []
mapping = {}
exc_count = 0

def scrapyList(url,index):
    print url
    user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
    headers = {'User-Agent': user_agent, 'timeout': 60}
    request = urllib2.Request(url, headers=headers)
    content =""
    try:
        response = urllib2.urlopen(request)
    except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
        print url + '列表页面发生异常', e
        print mapping

        if not mapping.has_key(str(hash(url))):
            mapping[str(hash(url))] = 0
        else:
            mapping[str(hash(url))] = mapping[str(hash(url))] + 1

        if mapping[str(hash(url))] > 10:
            logging.info(url+" 404 ")
            return
        time.sleep(10)
        scrapyList(url,index)
    else:
        content = response.read()
        soup = BeautifulSoup(content, 'lxml')
        c = soup.find(attrs={"id": "documentContainer"})
        workbook = xlsxwriter.Workbook("E:\zj\zj"+str(index)+".xlsx")
        header_style = workbook.add_format({'bold': True})
        header_style.set_bg_color("white")
        header_style.set_align('center')
        header_style.set_color("black")
        worksheet = workbook.add_worksheet("content")
        if c is None:
            ls_not.append(url+","+str(index))
            logging.info(url +" not find " + content)
            return
        book_a = c.find_all(attrs={"class": "row"})
        i = 0
        for div in book_a:
            s1 = BeautifulSoup(str(div), 'lxml')
            a = s1.find_all(attrs={"target": "_blank"})
            for aa in a:
                scrapyContent("http://www.csrc.gov.cn/pub/zjhpublic/" + aa["href"].replace("../../", ""),i,workbook,worksheet)
            i = i +1
            time.sleep(4)
        workbook.close()

def scrapyContent(url,index,workbook,worksheet):
    print url
    user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.82 Safari/537.36'
    headers = {'User-Agent': user_agent, 'timeout': 60}
    request = urllib2.Request(url , headers=headers)
    try:
        response = urllib2.urlopen(request)
    except (IOError, httplib.HTTPException, httplib.BadStatusLine) as e:
        print url + '列表页面发生异常', e
        time.sleep(10)
        scrapyContent(url,index,workbook,worksheet)
    else:
        content = response.read()
        # print content
        soup = BeautifulSoup(content, 'lxml')
        c = soup.select('#lTitle')

        print index
        str = ""
        for t in c:
            str = str + t.get_text().encode('utf-8')+ "\n"

        worksheet.write(index, 0,str )
        try:
            if str == '':
                logging.info(url +" not find title")
                raise Exception(url+" not find content")
        except (Exception) as e:
            print url + '列表页面发生异常', e
            time.sleep(10)
            scrapyContent(url,index,workbook,worksheet)
        else:
            worksheet.write(index, 0,str)
            cc = soup.select('#ContentRegion p')
            str = ""
            for tt in cc:
                str = str +tt.get_text() + "\n"
            try:
                if '' == str:
                    logging.info(url+" not find content")
                    raise Exception(url+" not find content")
            except (Exception) as e:
                print url + '列表页面发生异常', e
                time.sleep(10)
                scrapyContent(url,index,workbook,worksheet)
            else:
                worksheet.write(index, 1,str)

for i in range(53 , 54):
     scrapyList('http://www.csrc.gov.cn/pub/zjhpublic/3300/3313/index_7401'+('' if 0 == i else '_'+str(i))+'.htm',i)
     time.sleep(6)
