# -*- coding: UTF-8 -*-
import scrapy
from scrapy import log
from scrapy_test.ScrapyTestItem import ScrapyTestItem
import urllib
from scrapy_splash import SplashRequest
class SouGouWechatSpider(scrapy.Spider):
	name = "dmoz"
	# start_urls = ["https://weixin.sogou.com/weixin?type=1&s_from=input&query=%E4%BD%8F%E8%8C%83%E5%84%BF&ie=utf8&_sug_=n&_sug_type_="]

	# def __init__(self):
	# 	self.custom_url = "https://weixin.sogou.com/weixin?type=1&s_from=input&ie=utf8&_sug_=n&_sug_type_=&query=",

	def __init__(self, search_name=None, *args, **kwargs):
		super(SouGouWechatSpider, self).__init__(*args, **kwargs)
		self.custom_url = "https://weixin.sogou.com/weixin?type=1&s_from=input&ie=utf8&_sug_=n&_sug_type_=&query=%s" % urllib.quote(search_name)
		self.search_article_boot_url = "https://mp.weixin.qq.com"

	def start_requests(self):
		if(self.custom_url):
			yield scrapy.Request(url=self.custom_url, callback=self.search_public, meta={})

	
	def search_public(self, response):
		# log.msg("response : %s" % response.content)
		for sel in response.xpath('//div[@class="txt-box"]//a'):
			log.msg("sel: %s" % sel.extract())
			public_url = response.urljoin(sel.xpath('@href').extract_first().replace("&amp;", "&"))
			# public_url = response.urljoin(public_url)
			log.msg("public_url: %s" % public_url)
			yield SplashRequest(url=public_url, callback=self.search_article, args={'wait': 1})

	
	def search_article(self, response):
		log.msg("------------------ %s" % response.xpath('//div[@class="weui_category_title"]').extract())
		for sel in response.xpath('//div[@id="history"]//h4'):
			log.msg("search_article sel: %s" % sel.extract())
			search_article_url = response.urljoin(self.search_article_boot_url + sel.xpath("@hrefs").extract_first())
			date_time = sel.xpath("/parent/p[@class='weui_media_extra_info']/text()").extract_first()
			log.msg("search_article_url: %s" % search_article_url)
			log.msg("date_time: %s" % date_time)
			# yield scrapy.Request(url=search_article_url, callback=self.parse, meta={"date_time": date_time})
			pass


	def parse(self, response):
		item = scrapy.Item()

	


		# for sel in response.xpath('//ul/li'):
		# 	item = ScrapyTestItem()

		# 	title = sel.xpath('a/text()').extract()
		# 	link = sel.xpath('a/@href').extract()
		# 	desc = sel.xpath('text()').extract()
		# 	self.printXh(title)
