import scrapy
from bidspider.items import BidspiderItem
from scrapy.cmdline import execute
from utils.text_utils import TextUtils
import time


class CcgpSpider(scrapy.Spider):
    name = "ccgp"
    allowed_domains = ["ccgp.gov.cn"]
    start_urls = ["https://www.ccgp.gov.cn/cggg/zygg/", "https://www.ccgp.gov.cn/cggg/dfgg/",
                  "https://www.ccgp.gov.cn/xxgg/qtcgxx/"]

    def parse(self, response):
        print("正在采集列表：%s" % response.url)
        li_list = response.xpath("//ul[@class='c_list_bid']/li")
        can_next_page = True
        for li in li_list:
            href = li.xpath("./a/@href").extract_first()
            title = li.xpath("./a/text()").extract_first()
            ems = li.xpath("./em")
            btype = ''
            ctime = ''
            city = ''
            master = ''
            if len(ems) >= 4:
                btype = ems[0].xpath("./text()").extract_first()
                ctime = ems[1].xpath("./text()").extract_first()
                city = ems[2].xpath("./text()").extract_first()
                master = ems[3].xpath("./text()").extract_first()
            elif len(ems) == 3:
                ctime = ems[0].xpath("./text()").extract_first()
                city = ems[1].xpath("./text()").extract_first()
                master = ems[2].xpath("./text()").extract_first()
            # 判断时间是否超时，超时则停止采集
            if not TextUtils.data_time_is_available(ctime, '%Y-%m-%d %H:%M'):
                can_next_page = False
                break
            item = BidspiderItem()
            item['title'] = title
            item['link'] = response.urljoin(href)
            item['btype'] = btype
            item['ctime'] = ctime
            item['cityname'] = city
            item['master'] = master
            item['source'] = '中国政府采购网'
            yield scrapy.Request(url=item['link'], meta={'item': item}, callback=self.parse_detail)
        # 处理下一页
        if can_next_page:
            next_href = response.xpath("//p[@class='pager']/a[contains(text(), '下一页')]/@href").extract_first()
            if next_href:
                yield scrapy.Request(url=response.urljoin(next_href), callback=self.parse)

    def parse_detail(self, response):
        print("正在采集详情：%s" % response.url)
        item = response.meta['item']
        title = response.xpath("//div[@class='vF_detail_header']/h2/text()").extract_first()
        item['title'] = title
        # 解析获取内容，并格式化
        content = response.xpath("//div[@class='vF_detail_content']").extract_first()
        item['content'] = TextUtils.format_content(content, response)
        # 解析获取摘要信息
        info_trs = response.xpath("//div[@class='table']/table/tr")
        for tr in info_trs:
            tds = tr.xpath("./td")
            if len(tds) >= 2:
                tkey = tds[0].xpath("./text()").extract_first()
                tval = tds[1].xpath("./text()").extract_first()
                if tkey == '评审专家名单':
                    item['experts'] = tval
                elif tkey == '开标时间' or tkey == '截止时间':
                    item['endtime'] = tval
                elif tkey == '预算金额' or tkey == '总中标金额':
                    item['amount'] = tval
                elif tkey == '项目联系人':
                    item['project_contact'] = tval
                elif tkey == '项目联系电话':
                    item['project_phone'] = tval
                elif tkey == '采购单位':
                    item['master'] = tval
                elif tkey == '采购单位地址':
                    item['master_address'] = tval
                elif tkey == '采购单位联系方式':
                    item['master_contact'] = tval
                elif tkey == '代理机构名称':
                    item['proxy_name'] = tval
                elif tkey == '代理机构地址':
                    item['proxy_address'] = tval
                elif tkey == '代理机构联系方式':
                    item['proxy_contact'] = tval
        time.sleep(1)
        return item
