# # -*- coding: utf-8 -*-
# import re
# from copy import deepcopy
#
# import scrapy
#
#
# class BilianSpider(scrapy.Spider):
#     name = 'bilian'
#     # allowed_domains = ['ebnew']
#     # start_urls = ['http://ebnew/']
#
#     # 存储的数据格式
#     sql_data = dict(
#         projectcode='',  # 项目编号
#         web='',  # 信息来源网站
#         keyword='',  # 关键字
#         detail_url='',  # 招标详细页网址
#         title='',  # 第三方网站发布标题
#         toptype='',  # 信息类型
#         province='',  # 归属省份
#         product='',  # 产品范畴
#         industry='',  # 归属行业
#         tendering_manner='',  # 招标方式
#         publicity_date='',  # 招标公示日期
#         expiry_date='',  # 招标截止时间
#     )
#     # Form表单的数据格式
#     form_data = dict(
#         infoClassCodes='',
#         rangeType='',
#         projectType='bid',
#         fundSourceCodes='',
#         dateType='',
#         startDateCode='',
#         endDateCode='',
#         pubDateType='',
#         pubDateBegin='',
#         pubDateEnd='',
#         sortMethod='timeDesc',
#         orgName='',
#         normIndustry='',
#         normIndustryName='',
#         zone='',
#         zoneName='',
#         zoneText='',
#         key='路由器',
#         currentPage='',
#     )
#
#     def start_requests(self):
#         form_data = deepcopy(self.form_data)
#         form_data['key'] = '路由器'
#         form_data['currentPage'] = '1'
#         request = scrapy.FormRequest(
#             url='http://ss.ebnew.com/tradingSearch/index.htm',
#             formdata=form_data,
#             callback=self.parse_start
#         )
#         request.meta['form_data'] = form_data
#         yield request
#
#     def parse_start(self,response):
#         page_x_s =  response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
#         page_max = max(
#             [int(page) for page in page_x_s if re.match('\d+',page)]
#         )
#         page_max = 3
#
#         self.parse_page1(response)
#         # for i in range(2,page_max+1):
#         #     form_data = deepcopy(response.meta['form_data'])
#         #     form_data['currentPage'] = str(i)
#         #     request = scrapy.FormRequest(
#         #         url='http://ss.ebnew.com/tradingSearch/index.htm',
#         #         formdata=form_data,
#         #         callback=self.parse_page1
#         #     )
#         #     request.meta['form_data'] = form_data
#         #     yield request
#
#     # def test(self):
#     #     print('=================')
#     def parse_page1(self, response):
#        form_data = response.meta['form_data']
#        keyword = form_data.get('key')
#        content_list_x_s = response.xpath("//div[@class='ebnew-content-list']/div")
#        for content_list_x in content_list_x_s:
#             sql_data = deepcopy(self.sql_data)
#
#             sql_data['toptype'] = content_list_x.xpath("./div/i[1]/text()").extract_first()
#             sql_data['title'] = content_list_x.xpath("./div/a/text()").extract_first()
#             sql_data['publicity_date'] = content_list_x.xpath("./div/i[2]/text()").extract_first()
#             if sql_data['publicity_date']:
#                 # 使用正则表达式  表示除了日期外的字替换成空
#                 sql_data['publicity_date'] = re.sub('[^0-9\-]', '', sql_data['publicity_date'])
#
#             sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
#             sql_data['product'] = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
#             sql_data['expiry_date'] = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
#             sql_data['province'] = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
#             sql_data['detail_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
#             sql_data['keyword'] = keyword
#             sql_data['web'] = '必联网'
#
#             request = scrapy.Request(
#                 url=sql_data['detail_url'],
#                 callback=self.parse_page2
#             )
#             request.meta['sql_data'] = sql_data
#             yield request
#
#     def parse_page2(self,response):
#         sql_data = response.meta['sql_data']
#         li_x_s = response.xpath("//ul[contains(@class,'ebnew-project-information')]/li")
#         sql_data['projectcode'] = li_x_s[0].xpath("./span[2]/text()").extract_first()
#         sql_data['industry'] = li_x_s[7].xpath("./span[2]/text()").extract_first()
#         # print(sql_data)
#         if not sql_data['projectcode']:
#             projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})', response.body.decode('utf-8'))
#             sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
#         yield sql_data