from typing import Iterable

import scrapy
from myspider.items import MyspiderItem
from scrapy import Request


class ItcastSpider(scrapy.Spider):
    name = "itcast"
    # 2.检查域名
    allowed_domains = ["itcast.cn"]
    # 1.修改起始url
    start_urls = ["https://www.itheima.com/teacher.html?zc-dh"]
# 如需要携带cookies则需要重写start_requests()方法
    def start_requests(self):
        url = self.start_urls[0]
        temp = 'buvid3=A76B95EA-59A2-29DE-AEFE-C4D4C8BD3C9C91991infoc; b_nut=1709264291; i-wanna-go-back=-1; b_ut=7; _uuid=92CFB327-9B310-5B74-AA92-BB76A10A91F2E92288infoc; enable_web_push=DISABLE; DedeUserID=226772280; DedeUserID__ckMd5=d079a3c1dc3371ab; rpdid=|(k|k)J)mJ)J0J\'u~|mkYRYY~; header_theme_version=CLOSE; FEED_LIVE_VERSION=V_DYN_LIVING_UP; PVID=1; CURRENT_QUALITY=80; buvid4=D26B0479-1F96-CF30-1B04-E22E343A84BA61299-022081410-fy711ESOQ24ymN27r3s3KA%3D%3D; buvid_fp_plain=undefined; fingerprint=972395f1a2e69c35c4a77fb2d42a84eb; buvid_fp=6e7b4f434596b5b5c444b837cadd7604; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MzAyODAzMzEsImlhdCI6MTczMDAyMTA3MSwicGx0IjotMX0.hBIfJPglK6zeNfJ1C4V8PruaZeOleEFCm8JX26dWf8M; bili_ticket_expires=1730280271; SESSDATA=cabc312e%2C1745573131%2Ce5f4f%2Aa2CjBsWMPOchJBc0Kv14HAlNmSDw4qWlE3nwLj1QAB-BBkvg_6QQxGr32dp7pBJpQO6-cSVmZxY1Rta0pWdmNUMU5nb1R0bzNwck9hRjlFQWx2Vm9GaXRIQXJFa0FkRlFuek9ndHlISEdVQ3QyaWhXTTZZNGhwRmxsLVhrUHRFODdfajBJbGYzNlVBIIEC; bili_jct=4dfa903cef3cd04e3f249a8132594b07; sid=81yr2dr0; b_lsid=3677D51C_192D094CE07; bmg_af_switch=1; bmg_src_def_domain=i1.hdslb.com; CURRENT_FNVAL=4048; bp_t_offset_226772280=993159815096172544; home_feed_column=4; browser_resolution=710-766'
        cookies = {data.split('=')[0] : data.split('=')[-1]  for data in temp.split('; ')}
        yield scrapy.Request(
            url=url,
            callback=self.parse, # callback用于回调函数
            meta={'item':item}, # meta用于将数据不在同一层的数据拼接起来放到一起
            cookies=cookies

        )
    # 3.在parse中实现爬取逻辑
    def parse(self, response):
        # 定义对于网站的相关操作
        # with open('itcast.html','wb')as f:
        #     f.write(response.body)

        # 获取所有教师结点
        node_list = response.xpath('//*[@id="mCSB_1"]/div/ul/li/div/div[2]/h2')
        print(len(node_list))
        # 遍历所有教师结点
        for node in node_list:
            # temp= {}
            item = MyspiderItem()

            # Xpath方法返回的是一个选择器对象列表，extract()用于从选择器对象中提取数据
            item['name'] = node.xpath('./h3/text()').extract_first()
            item['title'] = node.xpath('./h4/text()').extract()
            item['desc'] = node.xpath('./p/text()').extract()
            # xpath结果为只含有一个值的列表，可以使用extract_first（）,如果为多个值则使用extract()
            yield item


            # print(response.url)
            # print(response.request.url)
            # print(response.headers)
            # print(response.request.headers)