"""
@FileName：hs300_stocks_spider.py\n
@Description：\n
@Author：Administrator\n
@Time：2024/3/1 16:52\n
@Copyright：©2019-2024 研发中心
"""
import json
import re

import scrapy
from lxml import etree
from scrapy.linkextractors import LinkExtractor  # 解析页面中的链接

from 爬虫.eastmoney.eastmoney.items import hs300_stocks


class hs300_stocks_spider(scrapy.Spider):  # 继承scrapy.Spider类
    name = 'hs300_stocks'  # spider名称，为spider唯一标识
    allowed_domains = ['data.eastmoney.com']  # 允许此爬虫抓取的域的字符串的可选列表，指定一个列表可以抓取，其它就不会抓取了
    start_urls = ['https://data.eastmoney.com/other/index/']  # 爬取起始页，当没有指定特定网址时，爬虫将开始抓取的网址列表
    #指定pipelines，如果不指定，则在settings中申明了多少，就用多少
    custom_settings = {
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.ConsolePipeline': 300},
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.MysqlPipeline': 200},
    }
    def parse(self,response):
        item = hs300_stocks() #定义Item类的对象，用于保存一条数据
        html = etree.HTML(response.text)
        # 获取表格数据
        rows = html.xpath('//div[@class="dataview-body"]/table/tbody/tr')
        for row in rows:
                cols = row.xpath('.//td')
                item['stock_code'] = cols[1].xpath('.//a/text()')
                item['stock_name'] = cols[2].xpath('.//a/span/text()')
                item["close_price"] = cols[3].xpath('.//span/text()')
                item["main_industry"] = cols[5].xpath('.//span/text()')
                item["region"] = cols[6].xpath('.//span/text()')
                item["weight"] = 0
                item["earnings_per_share"] = cols[7].xpath('./text()')
                item["net_asset_per_share"] = cols[8].xpath('./text()')
                item["net_assets"] = 0
                item["return_rate"] = cols[9].xpath('./text()')
                item["total_shares"] = cols[10].xpath('./text()')
                item["circulation_shares"] = cols[11].xpath('./text()')
                item["circulation_market_value"] = cols[12].xpath('./text()')
                item["related_info"] =""
                yield item
        # # 制定规则，找到json列表代码块
        # stock_pattern = re.compile(r"\[.*?\]")
        # list1 = stock_pattern.findall(text)
        # json_stocks = json.loads(list1[0])
        # #转换成普通json
        # for stock in json_stocks:
        #     item['stock_code'] = stock.get("SECUCODE")
        #     item['stock_name'] = stock.get("SECURITY_NAME_ABBR")
        #     item["close_price"] = stock.get('CLOSE_PRICE')
        #     item["main_industry"] = stock.get('INDUSTRY')
        #     item["region"] = stock.get('REGION')
        #     item["weight"] = stock.get('WEIGHT')
        #     item["earnings_per_share"] = stock.get('EPS')
        #     item["net_asset_per_share"] = stock.get('BPS')
        #     item["return_rate"] = stock.get('ROE')
        #     item["total_shares"] = stock.get('TOTAL_SHARES')
        #     item["circulation_shares"] = stock.get('FREE_SHARES')
        #     item["circulation_market_value"] = stock.get('FREE_CAP')
        #     item["net_assets"] = stock.get('BPS')
        #     item["related_info"] =""
        #     yield item
