# coding: utf-8
import urllib
import urllib2
import cookielib
from HTMLParser import HTMLParser
import re
import xlrd
import xlwt
import string
from urllib2 import Request, urlopen, URLError, HTTPError
import requests


#======================================================
class myHtmlParser(HTMLParser):
    irstart=0
    irend=0
    result=[]
    data=[]
    isfinids=0
    iscleasspace=0
    def __init__(self):
        HTMLParser.__init__(self)
        self.flag=None
    def handle_starttag(self,tag,attrs):
        if self.irstart==1:
            if tag=='a':
                self.flag='a'
                for href,link in attrs:
                    if href=='href':
                        if self.isfinids==1:
                            if link!='/newsAction.do?method=viewNews&classId=020019980000000472&newsId=020010040000016998' and link!='/newsAction.do?method=viewNews&classId=020010350000000877&newsId=020010040000108745':
                                if link.find('/newsAction.do?method=viewNews&classId')>=0:
                                    self.result.append(link)
                        else:
                            self.result.append(link)





    def handle_data(self,data):
        if self.irend==1:
            sdata = data.strip()
            if sdata!='':
                if self.iscleasspace==1:
                    sdata=sdata.replace("\r\n","")
                    sdata=sdata.replace(" ","")
                    sdata=sdata.replace(' ','')
                self.data.append(sdata)
                #print 'data:'+sdata

#======================================================
#http://www.gdlr.gov.cn/newsAction.do?method=viewNews&classId=020010350000000680&newsId=020010040000205418
fdcinfolist=[]
findpage = 1
while findpage<=463:
    if findpage == 286:
        findpage+=1
        continue
    print str(findpage)+"获取所有id"
    cookie = cookielib.CookieJar()
    opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
    url='http://www.gdlr.gov.cn/newsAction.do?method=searchNews'
    tilename = u'顺德区'
    tilename = tilename.encode("gb2312")
    values={
        'classId':'null',
        'textGeneralType':'020010350000000680',
        'newsCatalogID':'020010350000000680',
        'isquery':'',
        'type':'',
        'type_name':'',
        'oldTitle':tilename,'oldTitle':'','titleName':tilename,'linkUrl':'',
        '020010350000000680':'','strTime':'','endTime':'','orderSign':'0','curPageNo':'1',
        'totalCnts':'4630','totalPages':'463','cntPerPage':'10','SplitFlag':'1','orderBy':'null','descOrAsc':'desc','gotoPage':findpage}
    data = urllib.urlencode(values)
    req = urllib2.Request(url,data)
    req.add_header('Host','www.gdlr.gov.cn')
    req.add_header('Referer','http://www.gdlr.gov.cn/newsAction.do?method=searchNews&textGeneralType=020010350000000680')
    req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.3; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0')
    response = opener.open(req)
    the_page=response.read()
    pstrl = unicode(the_page,"gb2312",'ignore').encode("utf8")
    response.close()
    m= myHtmlParser()
    m.irstart=1
    m.isfinids=1
    m.feed(pstrl)
    m.close()
    result = m.result
    for itemstr in result:
        fdcinfolist.append(itemstr)
    findpage+=1
#286获取所有id
#创建表格
f= xlwt.Workbook()#创建工作簿
sheetname = "sheet1"
sheet = f.add_sheet(sheetname,cell_overwrite_ok=True)
#行头
print "创建表格"
sinlindx = 0
print "长度:"+str(len(fdcinfolist))

print "删除重复前:"+str(len(fdcinfolist))
fdcinfolist =  list(set(fdcinfolist))
print "删除重复后:"+str(len(fdcinfolist))

while sinlindx<len(fdcinfolist):
    sheet.write(sinlindx,0,fdcinfolist[sinlindx])
    sinlindx+=1

f.save( u'顺德土地信息(结果出让公告)网址列表.xls')