# -*- coding: utf-8 -*-
import sys

import re
from datetime import datetime

import scrapy
from scrapy.crawler import CrawlerProcess

from buddha.items import NewsItem


class FjnetSpider(scrapy.Spider):
    name = "fjnet"
    allowed_domains = ["fjnet.com"]
    urls = {
        "http://www.fjnet.com/jjdt/": 2,
        "http://www.fjnet.com/hwjj/haiwainr/": 3
    }

    def __init__(self, page=1):
        scrapy.Spider.__init__(self, self.name)
        self.page = int(page)

    start_urls = tuple(urls.keys())

    regx = re.compile("(http://.+/)")

    def parse(self, response):
        [listnum] = response.css(".listnum").re("var countPage = ([0-9]+)")
        meta = {
            "cate_id": self.urls[response.url]
        }
        yield scrapy.Request(response.url, self.parse_page, meta=meta)

        listnum = self.page if self.page <= listnum else listnum

        for num in range(1, int(listnum)):
            yield scrapy.Request(response.url + "default_%d.htm" % num, self.parse_page, meta=meta)

    def parse_page(self, response):
        [host] = self.regx.findall(response.url)
        for li in response.css(".t_content_d>ul>li"):
            a = li.css("a")
            href = a.css("::attr(href)").extract_first()

            url = href if href.find(host) >= 0 else host + href
            yield scrapy.Request(url, self.parse_subpage, meta={
                "url": url,
                "date": li.css("em::text").extract_first(),
                "title": a.css("::text").extract_first(),
                "cate_id": response.meta["cate_id"]
            })

    def parse_subpage(self, response):
        [pagenum] = response.css(".listnum").re("var countPage = ([0-9]+)")
        meta = {
            "url": response.url,
            "date": response.meta["date"],
            "page": 1,
            "title": response.meta["title"],
            "pagenum": pagenum,
            "cate_id": response.meta["cate_id"]
        }
        yield scrapy.Request(response.url, self.parse_detail, meta=meta.copy(), dont_filter=True)
        for num in range(1, int(pagenum)):
            url = "%s_%d.htm" % (response.url[:-4], num)
            meta["page"] = num + 1
            yield scrapy.Request(url, self.parse_detail, meta=meta.copy())

    def parse_detail(self, response):
        news = NewsItem()
        news["url"] = response.meta["url"]
        news["page"] = response.meta["page"]
        news["pagenum"] = response.meta["pagenum"]
        news["ctime"] = datetime.strptime(response.meta["date"], "%Y-%m-%d")
        news["cate_id"] = response.meta["cate_id"]

        news["title"] = response.meta["title"]
        news["content"] = response.css(".TRS_Editor>p[align!='center']::text").extract()

        pic = response.css(".TRS_Editor>p[align='center']>img::attr(src)").extract_first()
        news["pic"] = response.url[:response.url.rfind("/") + 1] + pic if pic else pic
        yield news


if __name__ == "__main__":
    cp = CrawlerProcess(settings={
        "USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
        "ITEM_PIPELINES": {
            "buddha.pipelines.BuddhaPipeline": 301,
            "buddha.pipelines.BuddhaImagesPipeline": 300
        },
        "IMAGES_STORE": "/home/wwwroot/fotuo/Uploads/news"
    })
    cp.crawl(FjnetSpider, page=sys.argv[1])
    cp.start()
