from copy import deepcopy
import scrapy
import re


class BianlianSpider(scrapy.Spider):
    name = 'bianlian'
    allowed_domains = ['ebnew.com', 'ss.ebnew.com']

    keyword_s = ['路由器', '变压器']

    sql_data = dict(
        projectcode='',  # 项目编号
        web='',  # 信息来源网站（例如：必联网）
        keyword='',  # 关键字
        detail_url='',  # 招标详细页网址
        title='',  # 第三方网站发布标题
        toptype='',  # 信息类型
        province='',  # 归属省份
        product='',  # 产品范畴
        industry='',  # 归属行业
        tendering_manner='',  # 招标方式
        publicity_date='',  # 招标公示日期
        expiry_date='',  # 招标截止时间
    )

    FormData = dict(
        infoClassCodes='',
        rangeType='',
        projectType='bid',
        fundSourceCodes='',
        dateType='',
        startDateCode='',
        endDateCode='',
        normIndustry='',
        normIndustryName='',
        zone='',
        zoneName='',
        zoneText='',
        key='',
        pubDateType='',
        pubDateBegin='',
        pubDateEnd='',
        sortMethod='',
        timeDes='',
        orgName='',
        currentPage='',
    )

    def start_requests(self):
        for keyword in self.keyword_s:
            FormData = deepcopy(self.FormData)
            FormData['key'] = keyword
            FormData['currentPage'] = '1'
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.html',
                formdata=FormData,
                callback=self.parse_start
            )
            request.meta['form_data'] = FormData
            # print('*' * 40)
            # print('start_requests', FormData)
            yield request

    def parse_start(self, response):
        page_list_x = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        page_max = max(
            [int(page) for page in page_list_x if re.match('\d+', page)]
        )
        # print('*' * 40)
        # print(page_max)
        # 测试设置最大页码数为2
        page_max = 2
        self.parse(response)
        for page in range(2, page_max + 1):
            FormData = deepcopy(response.meta['form_data'])
            FormData['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url='https://ss.ebnew.com/tradingSearch/index.html',
                formdata=FormData,
                callback=self.parse
            )
            request.meta['form_data'] = FormData
            # print('*' * 40)
            # print('parse_start', FormData)
            yield request

    def parse(self, response):
        FormData = response.meta['form_data']
        keyword = FormData.get('key')
        # print('*' * 40)
        # print('parse', FormData, keyword)
        content_x_s = response.xpath('//div[@class="abstract-box mg-t25 ebnew-border-bottom mg-r15"]')
        for content_x in content_x_s:
            sql_data = deepcopy(self.sql_data)
            sql_data['toptype'] = content_x.xpath('./div[1]/i[1]/text()').extract_first()
            sql_data['title'] = content_x.xpath('./div[1]/a/@title').extract_first()
            sql_data['publicity_date'] = content_x.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                sql_data['publicity_date'] = re.sub('[^0-9\-]', '', sql_data['publicity_date'])
            sql_data['tendering_manner'] = content_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            sql_data['product'] = content_x.xpath('./div[2]/div[1]/p[2]/span[2]/@title').extract_first()
            sql_data['expiry_date'] = content_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            sql_data['province'] = content_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            sql_data['detail_url'] = content_x.xpath('./div[1]/a/@href').extract_first()
            sql_data['keyword'] = keyword
            sql_data['web'] = '必联网'

            print(sql_data['detail_url'])

            request = scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data'] = sql_data
            # print('*' * 40)
            # print('sql_data', sql_data)
            yield request

    def parse_page2(self,response):
        sql_data = response.meta['sql_data']
        print(sql_data)
        sql_data['projectcode'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        sql_data['industry'] = response.xpath('//ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        if not sql_data['projectcode']:
            projectcode_find = re.findall('项目编号[:：]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})',response.body.decode('utf-8'))
            sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
        print(sql_data)
        yield sql_data

