# -*- coding: utf-8 -*-
import scrapy
from pyquery import PyQuery as pq


class WxspiderSpider(scrapy.Spider):
    name = 'wxspider'
    allowed_domains = ['weixin.sogou.com', 'mp.weixin.qq.com']
    cookie = 'SUID=8C16707B2013940A000000005A1FF974; SUV=1512044912803761; ABTEST=0|1512044918|v1; weixinIndexVisited=1; SUIR=E07B1C166C68325F08578BF16D2C5767; SNUID=F3960F04807A20CB71C7B79080B24996; pgv_pvi=6746517504; IPLOC=CN1100; pgv_si=s6765034496; ppinf=5|1512207979|1513417579|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToxODolRTUlODglOTglRTUlQkQlQUN8Y3J0OjEwOjE1MTIyMDc5Nzl8cmVmbmljazoxODolRTUlODglOTglRTUlQkQlQUN8dXNlcmlkOjQ0Om85dDJsdUd4QTF0UUZZV2JoUWhoeU5jbGhtTUlAd2VpeGluLnNvaHUuY29tfA; pprdig=F5q8UErFh4tac8wgxSQGUTXUlVotp58Xe3iyrt2V8CJiRFRwZMUos4KuLbFrwHS4ZK1_HW-WOwJEWt7onoYBkTcsGnBWdWevvGKoAd_1kUEn8RYDybsR87G_wPZywjageWh0ZKVp7rK_By6QLH8gPzZ0JrB5Ae1TByHcqUCaaj8; sgid=29-32211927-AVoiadmvBa9F7qZjntywRxt8; ppmdig=15122079790000007d0a40ca51039f4bef9185fcc93fd899; sct=12; JSESSIONID=aaa3zBYtRyJn_dGzHPv8v'
    kw = '日本'  # 搜索关键字
    # global times
    # times = 0
    start_urls = ['http://weixin.sogou.com/weixin?type=2&query={}'.format(kw)]

    def parse(self, response):
        url_list = response.xpath('//h3/a/@href').extract()  # 获取搜狗页面微信文章链接
        for url in url_list:
            item = {}
            item['url'] = url
            yield scrapy.Request(
                url,
                callback=self.detail_parse,
                meta={'item': item}
            )

        next_url = response.xpath('//a[@class="np"]/@href').extract_first()
        if next_url is not None:
            next_url = 'http://weixin.sogou.com/weixin' + next_url
            yield scrapy.Request(
                next_url,
                cookies={i.split('=')[0]: i.split('=')[-1] for i in self.cookie.split('; ')},
                callback=self.parse
            )
        else:
            return None

    def detail_parse(self, response):  # 详情页处理
        # global times
        # times += 1
        item = response.meta['item']
        # if times % 10 == 0:
        #     num = 10
        #     page_num = times / 10
        # else:
        #     num = times % 10
        #     page_num = times / 10 + 1
        # item['sit'] = '第%d页第%d条' % (page_num, num)
        doc = pq(response.text)
        item['title'] = response.xpath('//title/text()').extract_first()
        item['content'] = doc('.rich_media_content').text()
        item['date'] = doc('#post-date').text()
        item['nickname'] = doc('#js_profile_qrcode > div > strong').text()
        yield item
