# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from copy import deepcopy

KEYWORDS = ["路由器", "适配器", "电容器", "单片机", "机器手"]
POST_DICT = {
    "infoClassCodes": '',
    "rangeType": '',
    "projectType": 'bid',
    "fundSourceCodes": '',
    "dateType": '',
    "startDateCode": '',
    "endDateCode": '',
    "normIndustry": '',
    "normIndustryName": '',
    "zone": '',
    "zoneName": '',
    "zoneText": '',
    "key": '',
    "pubDateType": '',
    "pubDateBegin": '',
    "pubDateEnd": '',
    "sortMethod": 'timeDesc',
    "orgName": '',
    "currentPage": '1',
}

SQL_DICT = {
    'projectcode': '',  # 项目编号
    'web': '必联网',  # 信息来源网站
    'keyword': '',  # 关键字
    'detail_url': '',  # 招标详细页网址
    'title': '',  # 第三方网站发布标题
    'toptype': '',  # 信息类型
    'province': '',  # 归属省份
    'product': '',  # 产品范畴
    'industry': '',  # 归属行业
    'tendering_manner': '',  # 招标方式
    'publicity_date': '',  # 招标公示日期
    'expiry_date': '',  # 招标截止时间
}
CN_KEY_MAP = {
    '项目编号': 'projectcode',
    '公告类型': 'toptype',
    '截止时间': 'expiry_date',
    '招标方式': 'tendering_manner',
    '招标机构': 'dump',
    '招标地区': 'province',
    '招标产品': 'product',
    '所属行业': 'industry',
}


class BilianSpider(scrapy.Spider):
    name = 'bilian'
    allowed_domains = ['ebnew', 'www.ebnew.com']
    start_urls = ['http://http://www.ebnew.com//']

    def start_requests(self):
        fdata = deepcopy(POST_DICT)
        # print(fdata)
        for k in KEYWORDS:
            for i in range(1, 11):
                fdata['currentPage'] = str(i)
                fdata['keyword'] = k
                yield scrapy.FormRequest(
                    url='http://ss.ebnew.com/tradingSearch/index.htm',
                    formdata=fdata,
                    callback=self.digest_parse,
                    meta={'keyword': k})

    def digest_parse(self, response):
        links = response.xpath(
            "//div/div/a[contains(@class,'abstract-title')]")
        for link_div in links:
            data = deepcopy(SQL_DICT)
            data['detail_url'] = link_div.xpath('./@href').extract_first()
            data['publicity_date'] = link_div.xpath(
                '../i[2]/text()').extract_first()
            data['publicity_date'] = data['publicity_date'].split(':')[1]
            data['keyword'] = response.meta['keyword']
            # print(data['detail_url'])
            yield Request(url=data['detail_url'],
                          callback=self.detail_parse,
                          meta={'data': data})

    def detail_parse(self, res):
        detail = res.xpath(
            '//ul[contains(@class,"ebnew-project-information")]/li')
        detail = [''.join(d.xpath('string(.)').extract()) for d in detail]
        detail = [
            d.replace('\n', '').replace('\r', '').replace('\t','')\
                .replace(' ', '')
            for d in detail
        ]
        # print(detail)
        detail = [d.split('：') for d in detail]
        data = res.meta['data']
        # map those keys to english
        # detail = [for d in detail]
        for d in detail:
            data[CN_KEY_MAP[d[0]]] = d[1]
        data['title'] = res.xpath(
            "//h2[contains(@class,'details-title')]/text()").extract_first()
        del data['dump']
        yield data