# -*- coding: utf-8 -*-
import re

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from bs4 import BeautifulSoup
from scrapy_redis.spiders import RedisSpider

from example.items import youyuanItem

class YouyuanSpider(RedisSpider):
    name = 'youyuan'
    # allowed_domains = ['new.zmctc.com']
    # start_urls = ['http://new.zmctc.com/zjgcjy/jyxx/004010/004010001/?Paging=1']

    redis_key = "youyuanspider:start_urls"

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop('domain', '')
        self.allowed_domains = filter(None, domain.split(','))
        super(YouyuanSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        tr_list = soup.find_all('table')[10].find_all('tr', height="30")
        for tr in tr_list:
            detail_url = "http://new.zmctc.com" + tr.find_all('td')[1].find('a')['href']
            project_name = tr.find_all('td')[1].find('a')['title']
            pubtime = tr.find_all('td')[2].text.replace("[", "").replace("]", "").strip()

            yield scrapy.Request(detail_url,callback=self.parse_detail,meta={"pro_name":project_name,"pro_date":pubtime})
            # print (detail_url + "\t" + project_name + "\t" + pubtime)

        for i in range(1,3):
            page_url = 'http://new.zmctc.com/zjgcjy/jyxx/004010/004010001/?Paging=' + str(i+1)
            yield scrapy.Request(page_url,callback=self.parse)

    def parse_detail(self,response):
        item=youyuanItem()
        pro_name=response.meta['pro_name']
        pro_date=response.meta['pro_date']

        soup = BeautifulSoup(response.text, 'lxml')
        money_pattern = re.compile(r'[0-9]+\.[0-9]+|[0-9]+')
        com_name = ar_name=""
        money = 0

        tr_list = soup.find_all('table')[12].find_all('tr')
        for i in range(len(tr_list)):
            if "项目编号" in tr_list[i].text:
                registration_num = tr_list[i].find_all('td')[1].text.strip()
            if "项目名称" in tr_list[i].text:
                pro_name = tr_list[i].find_all('td')[1].text.strip()

            if "中标单位" in tr_list[i].text:
                td_list = tr_list[i].find_all('td')
                for k in range(1, 4):
                    for j in range(len(td_list)):
                        if "中标单位" in td_list[j].text:
                            com_name = tr_list[i + k].find_all('td')[j].text.strip()
                        if "项目经理" in td_list[j].text:
                            ar_name = tr_list[i + k].find_all('td')[j].text.strip()
                            if len(ar_name) > 4 or len(ar_name) < 2:
                                ar_name = ""
                        if "中标价" in td_list[j].text:
                            money = float(money_pattern.findall(tr_list[i + k].find_all('td')[j].text)[0]) / 10000.0
                            if money < 1: money = 0

                    if "公告开始时间" in tr_list[i + k + 2].text:
                        break

        if com_name!='':
            item['proname']=pro_name
            item['comname'] = com_name
            item['prodate'] = pro_date
            item['arname'] = ar_name
            item['money'] = money
            item['oriurl'] = response.url

            yield item


