# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy
class BilianSpider(scrapy.Spider):
    name = 'bilian'
    allowed_domains = ['ebnew','ss.ebnew.com']
    # start_urls = ['http://ebnew/']
    keyword_s = ['路由器','变压器']
    # 存储的数据格式
    # 存储的数据格式
    sql_data = dict(
        projectcode='',  # 项目编号
        web='',  # 信息来源网站
        keyword='',  # 关键字
        detail_url='',  # 招标详细页网址
        title='',  # 第三方网站发布标题
        toptype='',  # 信息类型
        province='',  # 归属省份
        product='',  # 产品范畴
        industry='',  # 归属行业
        tendering_manner='',  # 招标方式
        publicity_date='',  # 招标公示日期
        expiry_date='',  # 招标截止时间
    )
    #Form表单在数据格式
    form_data = dict(
        infoClassCodes = '',
        rangeType = '',
        projectType = 'bid',
        fundSourceCodes = '',
        dateType = '',
        startDateCode = '',
        endDateCode = '',
        pubDateType = '',
        pubDateBegin = '',
        pubDateEnd = '',
        sortMethod = 'timeDesc',
        orgName = '',
        normIndustry = '',
        normIndustryName = '',
        zone = '',
        zoneName = '',
        zoneText = '',
        key = '路由器',
        currentPage = '',

    )
    def start_requests(self):
        # for keyword in self.keyword_s:
        #     #深拷贝了一份formdata,里面都是为空
        #     form_data = deepcopy(self.form_data)
        #     form_data['key'] = keyword
        #     form_data['currentPage'] = '1'
        #     request = scrapy.FormRequest(
        #         url='',
        #         formdata= form_data,
        #         callback=self.parse_start
        #     )
        #     #为了下次使用,把对象保存到request,遍历第二页的时候就能拿到form_data
        #     request.meta['form_data'] = form_data
        #     yield request

        yield scrapy.Request(
            url='http://www.ebnew.com/businessShow/631160959.html',
            callback=self.parse_page1
        )

        # formdata = self.form_data
        # formdata['key'] = '路由器'
        # formdata['currentPage'] = '2'
        # yield scrapy.FormRequest(
        #     url="http://ss.ebnew.com/tradingSearch/index.htm",
        #     formdata=formdata,
        #     callback=self.parse_page2
        # )

    def parse_start(self,response):
        a_text_s = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        page_max = max(
            [int(a_text) for a_text in a_text_s if re.match('\d+',a_text)]
        )
        page_max =2
        # 这是访问第一页，下面的循环是访问第二页以后的
        self.parse_page1(response)
        for page in range(2,page_max+1):
            #拿到reque传过来 的meta['form_data']
            form_data = deepcopy(response.meta['form_data'])
            form_data['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            #发送前把form_data交给reque
            request.meta['form_data'] = form_data
            #把请求交给调度器
            yield request

    def parse_page1(self, response):
        #把结果封装到SQL_data
        # form_data = response.meta['form_data']
        # keyword = form_data.get['key']
        # content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        # for content_list_x in content_list_x_s:
        #     sql_data = deepcopy(self.sql_data)
        #     sql_data['toptype'] = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
        #     sql_data['title'] = content_list_x.xpath('./div[1]/a/text()').extract_first()
        #     sql_data['publicity_date']  = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
        #     if sql_data['publicity_date']:
        #             表示除了日期外的字替换成空
        #         sql_data['publicity_date'] = re.sub('[^0-9\-]', '', sql_data['publicity_date'])
        #
        #     sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
        #     sql_data['product']  = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
        #     sql_data['expiry_date']  = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
        #     sql_data['province']  = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
        #     sql_data['detail_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
        #     sql_data['keyword'] = keyword
        #     sql_data['web'] = '必联网'
        #     #测试完上面代码没问题才进行下一步
        #     # print(sql_data)
        #     request = scrapy.FormRequest(
        #         url=sql_data['detail_url'],
        #         callback=self.parse_page2
        #     )
        #     request.meta['sql_data'] = sql_data
        #     yield request

        #测试用,把结果显示到屏幕
        content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x in content_list_x_s:
            toptype = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
            title = content_list_x.xpath('./div[1]/a/text()').extract_first()
            publicity_date = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
            if publicity_date:
                publicity_date = re.sub('[^0-9\-]', '', publicity_date)

            tendering_manner = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            product = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
            expiry_date = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            province = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()

            print(toptype, title, publicity_date, tendering_manner, product, expiry_date, province)

    def parse_page2(self, response):
        # sql_data = response.meta['sql_data']
        # sql_data['projectcode'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()')
        # sql_data['industry'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()')
        #
        # if not sql_data['projectcode']:
        #     projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})', response.body.decode('utf-8'))
        #     sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
        # # print(sql_data)
        # # print(projectcode, industry)
        # yield sql_data
        # 测试
        li_x_s = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li')
        projectcode = li_x_s[0].xpath('./span[2]/text()').extract_first()
        industry = li_x_s[7].xpath('./span[2]/text()').extract_first()
        if not projectcode:
            projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})', response.body.decode('utf-8'))
            projectcode = projectcode_find[0] if projectcode_find else ""
        print(projectcode, industry)

def parse_page(self, response):
        with open('2.html','wb') as f:
            f.write(response.body)
