import scrapy
import  re
from  copy import  deepcopy


class BilianSpider(scrapy.Spider):
    name = 'bilian'
    allowed_domains = ['ebnew']
    # start_urls = ['http://ebnew/']
    caiji_data = dict(
        bianhao = '',#项目编号
        laiyuan = '',#采集来源
        title = '',#采集标题
        url = '',#来源详细网址链接
        shijian = '',#详细招标时间
        key = '',#关键字
    ) #获取采集数据；用来存储； 最近时间太紧了；存5个字段吧；

    form_data = dict(
        infoClassCodes='zbgg',  #招标公告
        rangeTyp='',  #
        projectType='bid',  # 这个值不会变换，所以直接默认
        fundSourceCodes='',  #
        dateType='',  #
        startDateCode='',  #
        endDateCode='',  #
        normIndustry='',  #
        normIndustryName='',  #
        zone='',  #
        zoneName='',  #
        zoneText='',  #
        key='',  # 路由器用户输入的关键词：
        pubDateType='',  #
        pubDateBegin='',  #
        pubDateEnd='',  #
        sortMethod='timeDesc',  # 这个值不会变换，所以直接默认
        orgName='',  #
        currentPage='',  # 当前页面2

    )
    keyword_s = ['路由器', '变压器']
    def start_requests(self):
        for keyword in self.keyword_s:
            form_data = deepcopy(self.form_data)
            form_data['key'] = keyword
            form_data['currentPage'] = '1' #设置访问的页面,必须加单引号；否则娶不到值
            request = scrapy.FormRequest(
                url = 'https://ss.ebnew.com/tradingSearch/index.htm',
                formdata = form_data,
                callback = self.start_parse1,
                dont_filter=True
            )
            request.meta['form_data'] = form_data
            yield  request
    def start_parse1(self,response):
        # print('abc:',response.body)
        # with open('test.html','wb') as f:
        #     f.write(response.body) 测试用的
        for i in range(1,3):
            form_data = deepcopy(response.meta['form_data'])
            form_data['currentPage']  =str(i)
            request =  scrapy.FormRequest(
                url = 'https://ss.ebnew.com/tradingSearch/index.htm',
                formdata = form_data,
                dont_filter = True,
                callback = self.start_page1
            )
            request.meta['form_data'] = form_data
            yield request
    def  start_page1(self,response):

        form_data = deepcopy(response.meta['form_data'])
        url_s = response.xpath("//div[contains(@class,'abstract-head')]/a[contains(@class,'line-h26')]/@href")
        for url in url_s:
            caiji_data = deepcopy(self.caiji_data)
            # print('==='*60)
            url_1= url.get()
            # print(url.get())
            # print('==='*60)
            # print(type(url_1))
            caiji_data['url'] = url_1
            # print("**"*80)
            # print(caiji_data['url'])
            # print("=="*80)
            caiji_data['key'] = form_data['key']
            request = scrapy.Request(
                url = url_1,
                dont_filter=True,
                callback = self.start_page2
            )
            request.meta['caiji_data'] = caiji_data
            # print("a"*150)
            yield request
    def start_page2(self,response):
        '''
        把采集到的数据赋值到caiji_data ；交给管道工处理
        :param response:
        :return:
        '''
        # print("a"*100)
        # content = re.sub('\n','',response)
        # print(content)
        z = str(response.body_as_unicode())
        content = re.sub("\n",'',z)
        content =str(content)
        #     re.sub("\n",'',zhi)
        # print('*'*100)
        # print(zhi)
        # print('='*100)
        bianhao = ''
        caiji_data = deepcopy(response.meta['caiji_data'])


        print('类型：',type(content))
        caiji_data['bianhao'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first() # 项目编号
        if caiji_data['bianhao']  is None:
            caiji_data['bianhao'] = ''
        caiji_data['laiyuan']  = '比联网' # 采集来源
        caiji_data['title']  = re.findall('<title>(.*?)</title>',content)[0]  # 采集标题
        caiji_data['shijian']  = re.findall('<span class="fl">(.*?)</span>',content)[0]  # 详细招标时间
        # print('*'*100)
        # print(caiji_data)

        yield  caiji_data
