# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy
class BidlinkSpider(scrapy.Spider):
    name = 'bidlink'
    allowed_domains = ['ebnew', 'ss.ebnew.com']
    keywords = [
        '路由器', '变压器'
    ]
    # 存储的数据格式
    sql_data = dict(
        projectcode = '',  #项目编号
        web = '',
        keyword = '',
        detail_url = '',
        title = '',  # 第三方网站发布标题
        toptype = '', # 信息类型
        province = '',
        product = '', #招标产品范畴
        industry = '', #归属行业
        tendering_manner = '',  #招标方式
        publicity_date = '',  #招标公示日期
        expiry_date = '',  #招标截止日期
    )
    # form表单数据格式
    form_data = dict(
        infoClassCodes = '',
    rangeType = '',
    projectType = 'bid',
    fundSourceCodes = '',
    dateType = '',
    startDateCode = '',
    endDateCode = '',
    normIndustry = '',
    normIndustryName = '',
    zone = '',
    zoneName = '',
    zoneText = '',
    key = '',  #搜索的关键字
    pubDateType = '',
    pubDateBegin = '',
    pubDateEnd = '',
    sortMethod = 'timeDesc',
    orgName = '',
    currentPage = ''
    )
    def start_requests(self):
        for keyword in self.keywords:
            form_data = deepcopy(self.form_data)   # 对form_data进行深拷贝
            form_data['key'] = keyword
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                url="https://ss.ebnew.com/tradingSearch/index.htm",
                formdata=form_data,
                callback=self.parse_start
            )
            request.meta['form_data'] = form_data    # 将form_data值保存给后面函数使用
            yield request
        '''
        yield scrapy.Request(
            url='http://www.ebnew.com/businessShow/631160959.html',
            callback= self.parse_page2
        )
        
        form_data = self.form_data
        form_data['key'] = '路由器'
        form_data['currentPage'] = '2'
        yield scrapy.FormRequest(
            url = "https://ss.ebnew.com/tradingSearch/index.htm",
            formdata = form_data,
            callback = self.parse_page1,
        )
        '''
    def parse_start(self,response):
        a_text_s = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()    # 获得所有页数
        page_max = max(
            [int(a_text) for a_text in a_text_s if re.match('\d+',a_text)]
        )
        page_max = 3
        for page in range(2,page_max):
            form_data = deepcopy(response.meta['form_data'])     # 获取request.meta保存的form_data
            form_data['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url='https://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            request.meta['form_data'] = form_data
            yield request
    def parse_page1(self,response):
        form_data = response.meta['form_data']    # 获取函数parse_start中request.meta保存的form_data
        keyword = form_data.get('key')
        content_list_xs = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x in content_list_xs:
            sql_data = deepcopy(self.sql_data)
            sql_data['toptype'] = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
            sql_data['title'] = content_list_x.xpath('./div[1]/a/text()').extract_first()
            sql_data['publicity_date'] = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                sql_data['publicity_date'] = re.sub('[^0-9\-]','',sql_data['publicity_date'])    # 除了数字和-，其它都用空格代替
            sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            sql_data['product'] = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
            sql_data['expiry_date'] = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            sql_data['province'] = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            sql_data['detaiil_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
            sql_data['keyword'] = keyword
            sql_data['web'] = 'bidlink'   # source web
            request = scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data'] = sql_data
            print(sql_data)
    def parse_page2(self,response):
        sql_data = response.meta['sql_data']
        sql_data['projectcode'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        sql_data['industry'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        if not sql_data['projectcode']:    #标题没有项目编号，则全文查找
            projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})',response.body.decode('utf-8'))       # \s:空格, [a-zA-Z0-9\-_]{10,80}:项目编号格式
            projectcode = projectcode_find[0] if projectcode_find else ""
        print('parse2:%s' % sql_data)



