# -*- coding: utf-8 -*-

# 1.	爬取优美语句的词语和词语前五个造句 (8分)
# 2.	爬取优美语句的词语和词语前五个造句的日期 (8分)
# 3.	爬取小学生造句的词语和词语前五个造句语句8分)
# 4.	爬取小学生造句的词语和词语前五个造句语句的日期 (8分)
# 5.	爬取中学生造句的词语和词语前五个造句语句(8分)
# 6.	爬取中学生造句的词语和词语前五个造句语句的日期(8分)
# 7.	爬取成语造句的词语和词语前五个造句(8分)
# 8.	爬取成语造句的词语和词语前五个造句的日期(8分)
# 9.	爬取关键词的词语和词语前五个造句(8分)
# 10.	爬取关键词的词语和词语前五个造句的日期(8分)
# 11.	爬取短信句子的词语和词语前五个造句(8分)
# 12.	爬取短信句子的词语和词语前五个造句的日期(8分)
# 13.	存入mongo数据库(4分)

import scrapy
from ..items import ZaojvItem


class JvSpider(scrapy.Spider):
    name = 'jv'
    start_urls = ['http://zaojv.com/wordy.html', 'http://zaojv.com/wordx.html', 'http://zaojv.com/wordz.html',
                  'http://zaojv.com/wordz.html', 'http://zaojv.com/glc.html', 'http://zaojv.com/dx.html']

    def parse(self, response):
        all_li = response.xpath("//li[@class='dotline']")
        for li in all_li:
            title = li.xpath("./a/text()")[0].extract()
            href = 'http://zaojv.com' + li.xpath("./a/@href")[0].extract()
            yield scrapy.Request(url=href, callback=self.parse_jv, meta={'title': title, 'href': href})

    def parse_jv(self, response):
        title = response.meta['title']
        href = response.meta['href']
        if '短信' in title:
            all_div = response.xpath("//div[@id='content']/div")[1:6]
        else:
            all_div = response.xpath("//div[@id='content']/div/div")[1:6]
        jvs = []
        for div in all_div:
            jv = div.xpath("string(.)")[0].extract()
            print(jv)
            jvs.append(jv)
        item = ZaojvItem()
        item['title'] = title
        item['href'] = href
        item['jv'] = jvs
        print(item)
        yield item
