# coding=UTF-8
'''
    百度股市通爬虫
'''
import scrapy
from baidustock.items import BaiduItem
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst
from baidustock.models.stock_industry_table import StockIndustryTable


class BaiduStockSpider(scrapy.Spider):
    '''
        爬虫类
    '''

    # 爬虫名
    name = 'baidu_stock'

    # 列表页模板
    list_url = (
        'https://gupiao.baidu.com/stock/{id}.html'
    )

    # 获取需要爬取信息的股票ID表
    stock_id_list = StockIndustryTable.get_list()

    # 建立字典，减少重复代码量
    generate_item_dict = {
        'stock_ID': '//a[@class="bets-name"]/span/text()',
        'closing_price': '//div[@class="line2"]/dl[1]/dd/text()',
        'opening_price': '//div[@class="line1"]/dl[1]/dd/text()',
        'minimun_price': '//div[@class="line2"]/dl[3]/dd/text()',
        'maximun_price': '//div[@class="line1"]/dl[3]/dd/text()',
        'turnover_total': '//div[@class="line1"]/dl[2]/dd/text()',
        'turnover_volume': '//div[@class="line1"]/dl[6]/dd/text()',
        'turnover_rate': '//div[@class="line2"]/dl[2]/dd/text()',
        'efamc': '//div[@class="line1"]/dl[8]/dd/text()',
        'mktcap': '//div[@class="line2"]/dl[8]/dd/text()',
    }

    def generate_url(self, id):
        '''
            生成每一个股票id对应的url路径
        '''

        # 当id_list中id为’6’开头时
        # 对应的url;'https://gupiao.baidu.com/stock/shXXXXXX.html'
        if id[0] == '6':
            return self.list_url.format(id='sh' + id)

        # 当id_list中id不为’6’开头时
        # 对应的url;'https://gupiao.baidu.com/stock/szXXXXXX.html'
        else:
            return self.list_url.format(id='sz' + id)

    def start_requests(self, id_list=stock_id_list):
        '''
            构造爬虫开始的请求
        '''
        # 遍历id_list，生成每一只股票对应的request
        for id in id_list:
            yield scrapy.Request(
                self.generate_url(id),
                callback=self.generate_item,
            )

    def generate_item(self, response):
        '''
            对网页中数据进行提取，存入item_loader
            成交量由内外盘两部分相加构成，用selector处理后存入item_loader
        '''

        # 生成ItemLoader对象
        item_loader = ItemLoader(item=BaiduItem(), response=response)

        # 只保留item中第一个值
        item_loader.default_output_processor = TakeFirst()

        # 对字典进行迭代，完成load
        for key, value in self.generate_item_dict.items():
            item_loader.add_xpath(
                key,
                value
            )

        # 返回item对象
        return item_loader.load_item()
