# coding:utf-8
import re, hashlib, time, urllib, httplib
from middle.bloomFilterSpider import bloomFilterSpider


class WechatSpider(bloomFilterSpider):
    '''
sogou站内wechat搜索关键字
use case :
a = WechatSpider("wechat")
a.start("谢和平",1)
    '''

    def start(self, key, page):
        self.key = key
        key = {'query': key}
        for i in range(page):
            self.run('http://weixin.sogou.com/weixin?' + urllib.urlencode(key) + '&type=2&page=' + str(i))

    def extract(self, html):
        # url,pubtime,title,content,searchword,md5
        for sel in html.xpath('//div[@class="wx-rb wx-rb3"]'):
            titleA = sel.xpath('div[@class="txt-box"]/h4/a')
            timeDiv = sel.xpath('div[@class="txt-box"]/div[@class="s-p"]')
            if titleA and timeDiv:
                titleA = titleA[0]
                timeDiv = timeDiv[0]
                res = {}
                res["url"] = self.getOriginalUrl(titleA.attrib.get("href", ""))
                if self.existUrl(res["url"]):
                    continue;
                res["pubtime"] = timeDiv.attrib.get("t", "0").encode("utf8")
                res["site"] = timeDiv.xpath("string(a/@title)").encode("utf8")
                res["title"] = self.getInnerText(titleA)
                res["content"] = self.opener.openurl(res["url"])
                res["md5"] = hashlib.md5(res["content"]).hexdigest()
                res["searchword"] = self.key
                yield res

    def getOriginalUrl(self, url):
        # httplib.HTTPConnection.debuglevel = 0
        #访问跳转的302页面就可以在headers中找到location
        conn = httplib.HTTPConnection(self.getSiteByUrl(url))  #这里是host
        conn.request('GET', self.getQueryByUrl(url))  #上面是分支 注意是GET
        for item in conn.getresponse().getheaders():
            if item[0].lower() == 'location':
                url = item[1]
                break
        conn.close()
        return url

    def debug(self, code):
        # with open("test.html","w") as f:
        #     f.write(code)
        #     f.close()
        pass
