# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy#深层拷贝


class Bilian1Spider(scrapy.Spider):
    name = 'bilian1'
    allowed_domains = ['ebnew','ss.ebnew.com/']


    key_words = [
        '路由器','黄金','膜结构','变压器'
    ]
    #储存的数据格式
    sql_data=dict(
        projectcode='',  # 项目编号
        web='',  # 信息来源网站
        keyword='',  # 关键字
        detail_url='',  # 招标详细页网址
        title='',  # 第三方网站发布标题
        toptype='',  # 信息类型
        province='',  # 归属省份
        product='',  # 产品范畴
        industry='',  # 归属行业
        tendering_manner='',  # 招标方式
        publicity_date='',  # 招标公示日期
        expiry_date='',  # 招标截止时间
    )




    #Form表单的数据格式
    form_data = dict(
        infoClassCodes='',
        rangeType='',
        projectType='bid',
        fundSourceCodes='',
        dateType='',
        startDateCode='',
        endDateCode='',
        normIndustry='',
        normIndustryName='',
        zone='',
        zoneName='',
        zoneText='',
        key='',  # 搜索的关键字
        pubDateType='',
        pubDateBegin='',
        pubDateEnd='',
        sortMethod='timeDesc',#排序方法
        orgName='',
        currentPage='',  # 当前页码
    )





    def start_requests(self):
        for keywords in self.key_words:
            form_data = deepcopy(self.form_data)
            form_data['key'] = keywords
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_start,
            )
            request.meta['form_data']=form_data
            yield request

        # yield scrapy.Request(
        #     url='http://www.ebnew.com/businessShow/640811095.html',
        #     callback=self.parse_page2
        # )
        # form_data = self.form_data
        # form_data['key'] = '膜结构'
        # form_data['currentpage'] = '2'
        # yield scrapy.FormRequest(
        #     url='http://ss.ebnew.com/tradingSearch/index.htm',
        #     formdata = form_data,
        #     callback=self.parse_page1,
        # )

    def parse_start(self,response):
        ye_ma_s=response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        ye_ma_max=max(
            [int(a_text) for a_text in ye_ma_s if re.match('\d+',a_text)]
        )
        ye_ma_max = 2
        self.parse_page1(response)#手动调用回调函数
        for page in range(2,ye_ma_max+1):#因为page_start访问第本身就是第一页
            form_data = deepcopy(response.meta['form_data'])
            form_data['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            request.meta['form_data']=form_data
            yield request


    def parse_page1 (self,response):
        form_data = response.meta['form_data']
        keyword=form_data.get('key')
        ebnew_content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x_s in ebnew_content_list_x_s:
            sql_data=deepcopy(self.sql_data)
            sql_data['toptype'] = content_list_x_s.xpath('./div[1]/i[1]/text()').extract_first()#信息类型，结果&公告
            sql_data['title'] = content_list_x_s.xpath('./div[1]/a[1]/text()').extract_first()#标题

            sql_data['publicity_date'] = content_list_x_s.xpath('./div[1]/i[2]/text()').extract_first()#发布的时期
            # if publicity_date:
            #     publicity_date = re.sub('[^0-9\-]','',publicity_date)#正则表达式


            sql_data['tendering_manner']=content_list_x_s.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()#招标方式
            sql_data['product']=content_list_x_s.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()#产品
            sql_data['province']=content_list_x_s.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()#省份
            sql_data['detail_url'] = content_list_x_s.xpath('./div[1]/a[1]/@href').extract_first()  # 标题
            sql_data['keyword']=keyword
            sql_data['web']='比联网'

            request = scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data']=sql_data
            # print(sql_data)
            # print(sql_data)
            yield request
            # expiry_date=content_list_x_s.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first#截止时间
            # print(sql_data['toptype'],sql_data['title'], sql_data['publicity_date'],sql_data['tendering_manner'],sql_data['product'],sql_data['province'])
        # with open('2.html','wb')as f:
        #     f.write(response.body)


    # start_urls = ['http://ebnew/']
    #
    # def parse(self, response):
    #     pass
    # def parse_page2(self,response):
    #     sql_data = response.meta['sql_data']
    #     li_x_s = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li')
    #     sql_data['projectcode'] = li_x_s[0].xpath('./span[2]/text()').extract_first()#项目编号
    #     sql_data['industry'] = li_x_s[7].xpath('./span[2]/text()').extract_first()
    #     if not sql_data['projectcode']:
    #         projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})', response.body.decode('utf-8'))#正则表达式
    #         sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
    #     # print(projectcode, industry)
    #     print(sql_data)
    def parse_page2(self, response):
        sql_data = response.meta['sql_data']
        sql_data['projectcode'] = response.xpath(
            '/ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        sql_data['industry'] = response.xpath(
            '/ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        if not sql_data['projectcode']:
            projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})', response.body.decode('utf-8'))
            sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
        # print('parse_2',sql_data)
        yield sql_data