# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy
class BilianSpider(scrapy.Spider):
    name = 'bilian'
    #allowed_domains = ['ebnew','www.ebnew.com','ss.ebnew.com']
    # 数据存储格式
    sql_data = dict (
    projectcode = '', #项目编号
    web = '', #信息来源网站（例如：必联网）
    keyword = '', #关键字
    detail_url = '', #招标详细页网址
    title = '', #第三方网站发布标题
    toptype = '', #信息类型
    province = '', #归属省份
    product = '', #产品范畴
    industry = '', #归属行业
    tendering_manner = '', #招标方式
    publicity_date = '', #招标公示日期
    expiry_date = '',#招标截止时间
    )
    form_data = dict(
    infoClassCodes = '',
    rangeType = '',
    projectType = 'bid',
    fundSourceCodes = '',
    dateType = '',
    startDateCode = '',
    endDateCode = '',
    normIndustry = '',
    normIndustryName = '',
    zone = '',
    zoneName = '',
    zoneText = '',
    key = '', #搜索的关键字
    pubDateType = '',
    pubDateBegin = '',
    pubDateEnd = '',
    sortMethod = 'timeDesc',
    orgName = '',
    currentPage = '',   #页码
    )
    key_s = ['路由器', '交换机']
    def start_requests(self):
        print('start_requests')
        form_data = self.form_data
        # form_data['key'] = '路由器'
        # form_data['currentPage'] = '3'

        for key in self.key_s:
            form_data['key'] = key
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                  url='http://ss.ebnew.com/tradingSearch/index.htm',
                  formdata = form_data,
                  callback=self.parse
            )

            request.meta['form_data'] = form_data
            yield request
    def parse(self, response):
        print('parse')
        Page_s = response.xpath("//form/a/text()").extract()
        # print(currentPage_s)
        currentPage_max = max([int(Page) for Page in Page_s if re.match('\d+',Page)])
        # print(currentPage)
        self.parse_page1(response)
        for currentPage in range(2,currentPage_max):
            # print(currentPage)
            form_data = deepcopy(response.meta['form_data'])
            # print(form_data)
            form_data['currentPage'] = str(currentPage)
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            yield request
    def parse_page1(self,response):

            # pass
            # print(response.body)
            # with open('bilian/3.html','wb') as f:
            #     f.write(response.body)
            content_list_x_s = response.xpath("//div[@class='ebnew-content-list']/div")
            for content_list_x in content_list_x_s:
                sql_data = deepcopy(self.sql_data)
                sql_data['toptype'] = content_list_x.xpath("./div[1]/i[1]/text()").extract_first()
                sql_data['title'] = content_list_x.xpath("./div[1]/a/text()").extract_first()
                # 招标方式
                sql_data['tendering_manner'] = content_list_x.xpath("./div[2]/div[1]/p[1]/span[2]/text()").extract_first()
                # print(title,tendering_manner)
                # 产品范围
                sql_data['product'] = content_list_x.xpath("./div[2]/div[1]/p[2]/span[2]/text()").extract_first()
                # 截止日期
                sql_data['expiry_date'] = content_list_x.xpath("./div[2]/div[2]/p[1]/span[2]/text()").extract_first()
                # 所属省份
                sql_data['province'] = content_list_x.xpath("./div[2]/div[2]/p[2]/span[2]/text()").extract_first()
                # print(expiry_date,province)
                sql_data['detail_url'] = content_list_x.xpath("./div[1]/a/@href").extract_first()
                # print(detail_url)
                request =scrapy.Request(
                    url=sql_data['detail_url'],
                    callback=self.parse2
                )

                request.meta['sql_data'] = sql_data
                yield request
    def parse2(self,response):
            sql_data = deepcopy(response.meta['sql_data'])
            sql_data['projectcode'] = response.xpath("//div/ul/li[1]/span[2]/text()").extract_first()
            if not sql_data['projectcode']:
                projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]){10,80}',response.body.decode('utf-8'))
                sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
            publicity_date_find = re.findall('发布时间[:：]{0,1}\s{0,2}([0-9\s:\-]{1,20})',response.body.decode('utf-8'))
            # 发布时间
            sql_data['publicity_date'] = publicity_date_find[0] if publicity_date_find else ""
            # 信息来源网站
            sql_data['web'] = response.xpath("//div[contains(@class,'details-widget')]//span[1]/text()").extract_first()
            # print(web)
            sql_data['keyword'] = self.form_data['key']
            sql_data['industry'] = response.xpath("//div[@class='position-relative']/ul/li[last()]/span[2]/text()").extract_first()
            # print(industry)
            print(sql_data)
            yield sql_data