#coding:utf-8
import re,hashlib,time,urllib,httplib
from middle.bloomFilterSpider import bloomFilterSpider

class BaiduSiteSpider(bloomFilterSpider):
    '''
百度站内搜索关键字
use case :
a = BaiduSiteSpider("baidu")
a.start("www.scu.edu.cn","谢和平",1)
    '''
    def start(self,site,key,page):
        self.site = site
        site = self.getSiteByUrl(site)
        self.key = key
        key = {'wd':"site:("+site+") "+key}
        for i in range(page):
            self.run('http://www.baidu.com/s?'+urllib.urlencode(key)+'&pn='+str(i*10))
    def extract(self,html):
        # url,pubtime,title,content,searchword,md5
         for sel in html.xpath('//div[@class="result c-container "]'):
            lable = sel.xpath('h3/a')
            if lable:
                lable = lable[0]
                res = {}
                res["url"] = self.getOriginalUrl(lable.attrib.get("href",""))
                if self.existUrl(res["url"]):
                    continue;
                res["pubtime"] = self.getPubtime(lable)
                res["site"] = self.site
                res["title"] = self.getInnerText(lable)
                res["content"] = self.opener.openurl(res["url"])
                res["md5"] = hashlib.md5(res["content"]).hexdigest()
                res["searchword"] = self.key
                yield res
    def getOriginalUrl(self,url):
        # httplib.HTTPConnection.debuglevel = 0
        #访问跳转的302页面就可以在headers中找到location
        conn = httplib.HTTPConnection(self.getSiteByUrl(url)) #这里是host
        conn.request('GET',self.getQueryByUrl(url))#上面是分支 注意是GET
        for item in conn.getresponse().getheaders():
            if item[0].lower()=='location':
                url = item[1]
                break
        conn.close()
        return url
    def getPubtime(self,lable):
        t = re.findall("'T':'(\d+)'",lable.attrib.get('data-click',''))
        if t:
            return t[0]
        return "0"