# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy
import re


class ZbspiderSpider(scrapy.Spider):
    name = 'zbspider'
    allowed_domains = ['ebnew.com','ss.ebnew.com']
    # start_urls = ['http://ebnew.com/']
    keyword_s=["路由器","变压器"]
    # 采集的字段
    data=dict(
            projectcode= '',
            web='',
            keyword='',
            detail_url='',
            title='',
            toptype='',
            province='',
            product='',
            industry='',
            tendering_manner='',
            publicity_date='',
            expiry_date=''

    )
    # 表达数据格式
    form_data=dict(
        infoClassCodes='',
        rangeType='',
        projectType='bid',
        fundSourceCodes='',
        dateType='',
        startDateCode='',
        endDateCode='',
        normIndustry='',
        normIndustryName='',
        zone='',
        zoneName='',
        zoneText='',
        key='',
        pubDateType='',
        pubDateBegin='',
        pubDateEnd='',
        sortMethod='timeDesc',
        orgName='',
        currentPage=''
    )

    def start_requests(self):
        for keyword in self.keyword_s:
            form_data=deepcopy(self.form_data)
            form_data['key']=keyword
            form_data['currentPage']='1'
            request= scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_star
            )
            request.meta["form_data"]=form_data
            yield request

    def parse_star(self,response):
        page_text_s=response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        print(page_text_s)
        page_max=max([int(page_text) for page_text in page_text_s if re.match('\d+',page_text)])
        print(page_max,type(page_max))
        self.parse(response)
        for page in range(2,page_max+1):
            form_data=deepcopy(response.meta["form_data"])
            form_data["currentPage"]=str(page)
            request=scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse
            )
            yield request

    def parse(self, response):
        Data_s=response.xpath('//div[@class="ebnew-content-list"]/div')
        for Data in Data_s:
            sql_data=deepcopy(self.data)
            sql_data['toptype']=Data.xpath('./div[1]/i[1]/text()').extract_first()
            sql_data['title']=Data.xpath('./div[1]/a/text()').extract_first()
            sql_data['detail_url']=Data.xpath('./div[1]/a/@href').extract_first()
            sql_data['publicity_date']=Data.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                sql_data['publicity_date']=re.sub('[^0-9\-]','',sql_data['publicity_date'])
            sql_data['tendering_manner']=Data.xpath('./div[2]/div/p[1]/span[2]/text()').extract_first()
            sql_data['product']=Data.xpath('./div[2]/div/p[2]/span[2]/text()').extract_first()
            sql_data['expiry_date']=Data.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            sql_data['province']=Data.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            if sql_data['expiry_date']:
                sql_data['expiry_date'] = re.sub('[^0-9\-]', '', sql_data['expiry_date'])
            # 请求详细页面
            request= scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data']=sql_data
            yield  request
        # with open('2.html','wb') as f:
        #     f.write(response.body)

    def parse_page2(self,response):
        sql_data=response.meta['sql_data']
        detail_x_s=response.xpath('//ul[contains(@class,"ebnew-project-information")]/li')
        projectcode=detail_x_s[0].xpath('./span[2]/text()').extract_first()
        sql_data['industry']=detail_x_s[7].xpath('./span[2]/text()').extract_first()
        if not projectcode:
            projectcode_find = re.findall(
                '(项目编码|项目标号|采购文件编号|招标编号|项目编号|竞价文件编号)[：:]{0,1}\s{0,2}\n*(</span\s*>)*\n*(<span.*?>)*\n*(<u*?>)*\n*([a-zA-Z0-9\-_\[\]]{1,100})',
                response.body.decode('utf-8'))
            if projectcode_find:
                sql_data['projectcode'] = projectcode_find[0][4] if projectcode_find else ""
        sql_data['projectcode']=projectcode
        yield sql_data
