# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request
from urllib import parse
from scrapy.loader import ItemLoader
from bs4 import BeautifulSoup

from ArticleSpider.items import SzstiArticleItem, ArticleItemLoader
class SzstiSpider(scrapy.Spider):
    name = 'szsti'
    allowed_domains = ['used.szsti.gov.cn']
    start_urls = ['http://used.szsti.gov.cn/notices/2018/1']

    def parse(self, response):
        """
        1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
        2. 获取下一页的url并交给scrapy进行下载， 下载完成后交给parse
        """

        # 解析列表页中的所有文章url并交给scrapy下载后并进行解析
        post_nodes = response.css(".list > li ")
        for post_node in post_nodes:
            create_date = post_node.css("span::text").extract_first("")
            post_url = post_node.css("a::attr(href)").extract_first("")
            title = post_node.css("a::text").extract_first("")
            if "公示" in title and "公示已结束" not in title:
                yield Request(url=parse.urljoin(response.url, post_url), dont_filter=True,meta={"create_date": create_date,"title":title},
                              callback=self.parse_detail)

        # 提取下一页并交给scrapy进行下载
        next_nodes = response.xpath('//*[@id="article"]/table/tr/td/a')
        for next_node in next_nodes:
            next_url = next_node.css('::attr(href)').extract_first("")
            regex_str = "(.*notices/201[5678].*$)"
            match_obj = re.match(regex_str, next_url)
            date_url = match_obj.group(1)
            if date_url:
                yield Request(url=parse.urljoin(response.url, date_url), callback=self.parse)


    def parse_detail(self, response):
        # create_date = response.meta.get("create_date", "")
        # title = response.meta.get("title", "")
        # article_item = SzstiArticleItem()
        # # 通过item loader加载item
        # item_loader = ArticleItemLoader(item=SzstiArticleItem(), response=response)
        # item_loader.add_value("create_date", [create_date])
        # item_loader.add_value("title", [title])

        # create_date = scrapy.Field(
        #     input_processor=MapCompose(date_convert),
        # )
        # policy_name = scrapy.Field()
        # source_department = scrapy.Field()
        # project_name = scrapy.Field()
        # corporation = scrapy.Field()
        # amount = scrapy.Field()
        # leader = scrapy.Field()
        # title = scrapy.Field()

        create_date = response.meta.get("create_date", "")
        title = response.meta.get("title", "")
        source_department = response.css("#article > p.publisher")

        # 通过item loader加载item
        item_loader = ArticleItemLoader(item=SzstiArticleItem(), response=response)

        # item_loader.add_css("project_name","#article > table > tbody > tr > td:nth-child(2):text")

        list_mumbers=response.xpath('//*[@id="article"]/table[1]/tbody/tr[1]/th/text()').extract()
        for list_mumber in list_mumbers:
            # for list in list_mumber:
                # item_loader.add_css("source_department", "#article > p.publisher::text")
                # item_loader.add_value("create_date", [create_date])
                # item_loader.add_value("url", response.url)
            if "项目类别" in list_mumber:
                pon_number = str(list_mumbers.index(list_mumber) + 1)
                policy_names = response.xpath('//*[@id="article"]/table[1]/tbody/tr/td['+pon_number+']/text()')
                for policy_name in policy_names:
                    item_loader.add_value("policy_name",policy_name)

            if "项目类别" not in list_mumber:
                no_data = ''
                item_loader.add_xpath("policy_name",'//*[@id="article"]/h1[2]')
                item_loader.add_value("policy_name", no_data)

            if "项目名称" in list_mumber:
                prn_number = str(list_mumbers.index(list_mumber)+1)
                project_names = response.xpath('//*[@id="article"]/table[1]/tbody/tr/td['+prn_number+']/text()').extract()
                for project_name in project_names:
                    str(project_names.index(project_name))
                    item_loader.add_value("project_name", project_name)

            if "单位" in list_mumber:
                c_number = str(list_mumbers.index(list_mumber)+1)
                corporations = response.xpath('//*[@id="article"]/table[1]/tbody/tr/td['+c_number+']/text()')
                for corporation in corporations:
                    item_loader.add_value("corporation", corporation)


            # if "金额" in list_mumber:
            #     o = list_mumber
            #     a_number = str(list_mumbers.index(list_mumber) + 1)
            #     item_loader.add_xpath("amount",'//*[@id="article"]/table[1]/tbody/tr/td[' + a_number +']/text()')


                # 项目名称 匹配规则
                # project_name = response.xpath('//*[@id="article"]/table[1]/tbody/tr/td['+n+']/text()').extract()
                # item_loader.add_xpath("project_name", '//*[@id="article"]/table/tbody/tr/td[' + project_name_number + ']/text()')

        # item_loader.add_xpath("corporation",'//*[@id="article"]/table/tbody/tr/td[3]/text()')



        print(item_loader)
