# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy
# deepcopy 深度拷贝

class BilianSpider(scrapy.Spider):
    name = 'bilian'
    # allowed_domains = ['ebnew']
    #allowed_domains = ['ebnew','ss.ebnew.com']

    # 定义一个搜索的关键字
    keyword_s = [
        '路由器','变压器'
    ]

    # 存储的数据格式
    sql_data = dict(
        projectcode='',  # 项目编号
        web='',  # 信息来源网站
        keyword='',  # 关键字
        detail_url='',  # 招标详细页网址
        title='',  # 第三方网站发布标题
        toptype='',  # 信息类型
        province='',  # 归属省份
        product='',  # 产品范畴
        industry='',  # 归属行业
        tendering_manner='',  # 招标方式
        publicity_date='',  # 招标公示日期
        expiry_date='',  # 招标截止时间
    )

    # Form表单的数据格式
    form_data = dict(
        infoClassCodes='',
        rangeType='',
        projectType='bid',
        fundSourceCodes='',
        dateType='',
        startDateCode='',
        endDateCode='',
        normIndustry='',
        normIndustryName='',
        zone='',
        zoneName='',
        zoneText='',
        key='',  # 搜索的关键字
        pubDateType='',
        pubDateBegin='',
        pubDateEnd='',
        sortMethod='timeDesc',
        orgName='',
        currentPage='',  # 当前页码
    )

    # 发送请求的方法
    def start_requests(self):
        # 遍历搜索的关键字
        for keyword in self.keyword_s:
            form_data = deepcopy(self.form_data)
            form_data['key'] = keyword
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                url = 'http://ss.ebnew.com/tradingSearch/index.htm',
                formdata = form_data,
                callback=self.parse_start,
            )
            request.meta['form_data'] = form_data
            yield request


        # 获取二级页面的请求
        # yield scrapy.Request(
        #     url='http://www.ebnew.com/businessShow/640047664.html',
        #     callback = self.parse_page2
        # )

        # 获取一级页面的请求方法
        # form_data = self.form_data
        # form_data['key'] = '路由器'
        # form_data['currentPage'] = '2'
        # yield scrapy.FormRequest(
        #     # url 表示要提交的表单地址
        #     url='http://ss.ebnew.com/tradingSearch/index.htm',
        #     formdata=form_data,
        #     callback=self.parse_pagel,
        # )




    def parse_start(self,response):
        # 拿到所有的页码数，并进行遍历,
        a_text_s = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        # 拿到a_text_s列表里最大的整数
        page_max = max(
            [int(a_text) for a_text in a_text_s if re.match('\d+',a_text)]
        )
        # print(page_max)
        # page_max = 2对page_max进行限制，值遍历了第一页和第二页
        page_max = 2
        self.parse_page2(response)
        # 对最大页码进行遍历
        for page in range(2,page_max+1):
            form_data = deepcopy(response.meta['form_data'])
            form_data['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url = 'http://ss.ebnew.com/tradingSearch/index.htm',
                formdata = form_data,
                callback = self.parse_page2
            )
            request.meta['form_data'] = form_data
            yield request



    def parse_page2(self, response):
        # print('1111111111111111111111111111111111111111111111111111')
        form_data = response.meta['form_data']
        keyword = form_data.get('key')
        # 写入抓取的页面
        # with open('2.html', 'wb') as f:
        #     f.write(response.body)
        # 获取一级页面标题
        content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x in content_list_x_s:
            sql_data = deepcopy(self.sql_data)
            # 是否竞标成功
            sql_data['toptype'] = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
            # 获得标题
            sql_data['title'] = content_list_x.xpath('./div[1]/a/text()').extract_first()
            # 招标发布的日期
            sql_data['publicity_date'] = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                # 如果查找到发布的日期，则对发布的日期进行替换。
                sql_data['publicity_date'] = re.sub('[^0-9\-]','',sql_data['publicity_date'])

            # 获取招标方式
            sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            # 招标产品
            sql_data['product'] = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
            # 招标截止日期
            sql_data['expiry_date'] = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            # 招标的地区
            sql_data['province'] = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            # print(toptype, title, publicity_date,tendering_manner,product,expiry_date,province)
            # 获得来源网网址
            sql_data['detail_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
            # 获得关键字
            sql_data['keyword'] = keyword
            # 获得来源网站
            sql_data['web'] = '必联网'
            request = scrapy.Request(
                url = sql_data['detail_url'],
                callback = self.parse_page3
            )
            request.meta['sql_data'] = sql_data
            yield request

    # 获取二级也没信息
    def parse_page3(self,response):

        sql_data = response.meta['sql_data']
        print(sql_data)
        # 项目的编号
        sql_data['projectcode'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        # 所属行业
        sql_data['industry'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        # 判断是否拿到项目编号
        if not sql_data['projectcode']:
            projectcode_find = re.findall('项目编号[: ：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})',response.body.decode('utf-8'))
            sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
            # print(projectcode,industry)
        # print('parse_2',sql_data)
        yield sql_data




