# -*- coding: utf-8 -*-
import scrapy
from hssplider.models.ArticleContent import ArticleContent
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from hssplider.mongoUtile import mongo


class HswzscontentSpider(CrawlSpider):
    name = 'hswzscontent'
    url_prefix = 'http://www.12345.suzhou.gov.cn/bbs/'
    allowed_domains = ['www.12345.suzhou.gov.cn']
    start_urls = ['http://www.12345.suzhou.gov.cn/bbs/forum.php?mod=forumdisplay&fid=2']
    headers = {
        "Host": "www.12345.suzhou.gov.cn",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
        "Accept-Encoding": "gzip, deflate",
        "Referer": "http://www.12345.suzhou.gov.cn/zfhf/",
        "Cookie": "Hm_lvt_361979bf0f47e7cf74cc8c25651f2c17=1524287739,1524673250; Hm_lpvt_361979bf0f47e7cf74cc8c25651f2c17=1524673250; NOkq_2132_saltkey=nBbbA6Ao; NOkq_2132_lastvisit=1524284132; NOkq_2132_visitedfid=2D42; NOkq_2132_home_diymode=1; Hm_lvt_a27b04b98f1f78776caef7b52c730227=1524451282,1524503868,1524505256,1524569046; NOkq_2132_seccode=3653.72dd494e712224222b; Hm_lpvt_a27b04b98f1f78776caef7b52c730227=1524665147; NOkq_2132_viewid=tid_1274350; NOkq_2132_st_t=0%7C1524673248%7Cded5c3ae167a3e63059fbfb787ee45d5; NOkq_2132_forum_lastvisit=D_2_1524673248; NOkq_2132_onlineusernum=243; NOkq_2132_sendmail=1; NOkq_2132_lastact=1524673404%09forum.php%09ajax",
        "Connection": "keep-alive",
        "User-Agent:": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"
    }
    rules = (
        Rule(LinkExtractor(allow=('page=[0-9]+',)), callback='parse'),
    )

    def parse(self, response):
        db=mongo()
        data = db.findall("note")
        for i in data:
            if i['link']:
                yield scrapy.Request(self.url_prefix+i['link'], callback=self.parse_content)

    def parse_content(self, response):
        contenlist = response.xpath("//div[contains(@id,'post')]")
        title = response.xpath("//span[@id='thread_subject']/text()").extract_first()
        url = response.xpath("//h1[@class='ts']/a[2]/@href").extract_first()
        k = 0
        for cc in contenlist:
            k = k + 1
            content = ArticleContent()
            content['link'] = url
            content['contenttitle'] = title
            content['author'] = cc.css('.authi a[class=xw1]::text').extract_first()
            content['floor'] = k
            content['content'] = cc.css('td[id*=postmessage]::text').extract_first()
            content['cTime'] = cc.xpath('.//div[@class="authi"]/em[1]/span/@title').extract_first()
            yield content
        next_page = response.xpath("//a[@class='nxt']/@href").extract_first()
        if next_page:
            next_page = self.url_prefix + next_page
            yield scrapy.Request(next_page, callback=self.parse_content)
