import json
import re
#万茂洁 朱易
import scrapy
from lxml import etree
from scrapy.linkextractors import LinkExtractor  # 解析页面中的链接

from 爬虫.eastmoney.eastmoney import items
from 爬虫.eastmoney.eastmoney.items import money_flow_daily


class money_flow_daily_spider(scrapy.Spider):  # 继承scrapy.Spider类
    name = 'money_flow_daily'  # spider名称，为spider唯一标识
    allowed_domains = ['data.eastmoney.com']  # 允许此爬虫抓取的域的字符串的可选列表，指定一个列表可以抓取，其它就不会抓取了
    start_urls = ['https://data.eastmoney.com/zjlx/zs000001.html']  # 爬取起始页，当没有指定特定网址时，爬虫将开始抓取的网址列表
    #指定pipelines，如果不指定，则在settings中申明了多少，就用多少
    custom_settings = {
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.ConsolePipeline': 300},
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.MysqlPipeline': 200},
    }
    def parse(self,response):
        item = money_flow_daily() #定义Item类的对象，用于保存一条数据
        html = etree.HTML(response.text)
        # 获取表格数据
        item['net_amount'] = html.xpath("//div[@class='dataview']//tr/td[4]/text()")  #主力流入的净额
        item['net_percentage'] = html.xpath("//div[@class='dataview']//tr/td[5]/text()")  #主力流入的净占比
        item["main_net_inflow"] = html.xpath("//table[@class='table1']//tr//td[3]/text()")  #今日主力净流入
        item["large_net_inflow"] = html.xpath("//table[@class='table1']//tr[2]//td[3]/text()")  #今日超大单净流入
        item["big_net_inflow"] = html.xpath("//table[@class='table1']//tr[3]//td[3]/text()")  #今日大单净流入
        item["medium_net_inflow"] = html.xpath("//table[@class='table1']//tr[4]//td[3]/text()")  #今日中单净流入
        item["small_net_inflow"] = html.xpath("//table[@class='table1']//tr[5]//td[3]/text()")  #今日小单净流入
        item["related_info"] = ""
        yield items