#   ClassName:  news_daily
#   description:
#           沪深300公告大全
#   @ author:郭海龙
#   @ Create：2024/4/7 21:40
#   @ Version:1.0

import json
import scrapy
from bs4 import BeautifulSoup
from 爬虫.eastmoney.eastmoney.items import news_daily


class news_daily_spider(scrapy.Spider):
    name = 'news_daily'
    allowed_domains = ['data.eastmoney.com']
    start_urls = ['https://data.eastmoney.com/notices/']
    custom_settings = {
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.ConsolePipeline': 300},
        'ITEM_PIPELINES': {'爬虫.eastmoney.eastmoney.pipelines.MysqlPipeline': 200},
    }

    def parse(self, response, **kwargs):
        link_header = "https://data.eastmoney.com/notices/detail/"
        item = news_daily()
        soup = BeautifulSoup(response.body, 'html.parser')
        scripts = soup.find_all('script')
        for script in scripts:
            if 'var pagedata' in script.text:
                start_index = script.text.find('[')
                end_index = script.text.rfind(']') + 1
                data_str = script.text[start_index:end_index]
                # 将json字符串转换成列表
                data = json.loads(data_str)

                for items in data:
                    # print("="*30)
                    # print(items)
                    item['title'] = items['codes'][0]['short_name']                         # 新闻标题
                    stock_code = items['codes'][0]['stock_code']
                    art_code = items['art_code']
                    item['link'] = link_header + stock_code + '/' + art_code + '.html'   # 新闻连接
                    item['publish_time'] = items['display_time']  # 发布日期
                    item['notice_title'] = items['title']  # 公告标题
                    item['notice_type'] = items['columns'][0]['column_name']  # 公告类型
                    item['notice_date'] = items['notice_date']  # 公告日期

                    # 传数据
                    yield item
