from typing import Iterable

import scrapy
from bs4 import BeautifulSoup
from scrapy import Request

from SpiderPro.items import SpiderproItem


class SpiderSpider(scrapy.Spider):
    name = "sina2"
    allowed_domains = ["sina.com.cn"]
    start_urls = ["https://mil.news.sina.com.cn/china/2021-06-29/doc-ikqcfnca4010361.shtml",
                  "http://slide.news.sina.com.cn/w/slide_1_86058_457928.html#p=1"]
    url2 = ('https://www.jd.com/',
            'https://www.sina.com.cn/')


    def start_requests(self) -> Iterable[Request]:
        for url in self.url2:
            yield self.make_requests_from_url(url)
            print('test')

    def parse(self, response):
        item = SpiderproItem()
        bs = BeautifulSoup(response.body, 'lxml')
        # item['urlname'] = bs.find('title').text
        # item['urlkey'] = bs.find(lambda tag: tag.get('name') == 'keywords').get('content')
        # item['urlcr'] = bs.find('div', {'class': 'footer-inner'}).find_all('p')[2].text
        print(bs)

    def make_requests_from_url(self, url):
        return scrapy.Request(url, dont_filter=True)

