# -*- coding: utf-8 -*-
from spider.models import CurrListedCorp
from scrapy import Request
import datetime
from scrapy.spiders import CrawlSpider
from spider.db import Session
from spider.items import CorpTypeItem


class CnInfoComCnBalanceSpider(CrawlSpider):
    """公司信息属性"""
    name = "type"
    allowed_domains = ['quotes.money.163.com/']

    balanceSheetColumn = {
        u'组织形式': 'corp_type'
    }

    balanceSheetUrl = 'http://quotes.money.163.com/f10/gszl_{}.html#01f02'

    # 脚本入口
    def start_requests(self):
        session = Session()
        try:
            # 获取当前上市公司代码列表
            stock_cd_list = session.query(
                CurrListedCorp.stock_cd
            ).all()

            for stock_cd in stock_cd_list:
                stock_cd = stock_cd[0]
                yield Request(
                    url=self.balanceSheetUrl.format(
                        stock_cd
                    ),
                    meta={
                        'stock_cd': stock_cd
                    },
                    callback=self.parsebalance
                )
        finally:
            session.close()

    # 解析表格,通过xpath定位到相应的列字段
    def parsebalance(self, response):

        sel = response.selector

        arrTitle = sel.xpath('//td[@class="td_label"]/text()').extract()
        arrValue = sel.xpath('//td[@class="td_width160"]/text()').extract()
        arr_res = dict(zip(arrTitle, arrValue))

        item = CorpTypeItem()

        item['stock_cd'] = response.meta['stock_cd']

        item['scrapy_time'] = datetime.datetime.now().strftime(
            '%Y%m%d'
        )

        for title, value in arr_res.iteritems():
            if title in self.balanceSheetColumn:

                '''三位以下的不是要的数据，都是省份信息'''
                if len(value) < 4:
                    return

                item[self.balanceSheetColumn[title]] = value.strip()
                print item
        return item

