# -*- coding: utf-8 -*-
import scrapy
import re


class LolSpider(scrapy.Spider):
    name = 'lol'
    allowed_domains = ['lol.qq.com']
    start_urls = ['http://lol.qq.com/webplat/info/news_version3/152/22006/m14286/list_1.shtml']
    domain = 'http://lol.qq.com'
    content_pattern = re.compile(r'\r\n|\u3000|\n|\t')

    def parse(self, response):
        for li in response.xpath('//ul[@id="newsListBox"]/li'):
            href = self.domain + li.xpath('./p/a[@class="beta_tit"]/@href').extract_first()
            title = li.xpath('./p/a[@class="beta_tit"]/text()').extract_first()
            update_time = li.xpath('./p/span[@class="beta_date"]/text()').extract_first()
            # print(href)
            # print(title)
            # http://lol.qq.com/webplat/info/news_version3/152/4579/7138/m5582/201509/376200.shtml
            yield scrapy.Request(
                href,
                callback=self.parse_page,
                meta={'title': title, 'update_time': update_time[5:]}
            )

        # 检测是否有下一页
        next_page_url = response.xpath('//a[@class="pagenext"]/@href').extract_first()
        if next_page_url is not None:
            next_page_url = self.domain + next_page_url
            yield scrapy.Request(
                next_page_url,
                callback=self.parse
            )

    def parse_page(self, response):
        title_txt_path = './txt/' + response.meta['title'] + '___' + response.meta['update_time'] + '.txt'
        content = ' '.join(response.xpath('//div[@id="article"]//text()').extract())
        with open(title_txt_path, 'w', encoding='utf-8') as f:
            f.write(content)
