import scrapy
from wangyiPro.items import WangyiproItem

class WangyiSpider(scrapy.Spider):
    name = "wangyi"
    # allowed_domains = ["www.xxx.com"]
    start_urls = ["https://www.163.com/"]
    #基于终端指令的持久化存储
    # def parse(self, response):
    #     print("****数据转换*****")
    #     a_lst = response.xpath('//*[@id="js_index2017_wrap"]/div[3]/div[1]/div[2]/ul/li[1]/a')
    #     all_data = [] # 存储所有解析到的数据
    #     for a in a_lst:
    #         # xpath返回的是列表,但是列表元素一定是selector类型的对象
    #         # extract 可以将Selector对象中data参数存储的字符串提取出来
    #         # print(a.xpath('./text()')[0].extract())
    #         title = a.xpath('./text()')[0].extract()
    #         print(title)
    #         dict = {
    #             title: title
    #         }
    #         all_data.append(dict)
    #         print(all_data)
    #     return all_data

    # 基于中间管道的持久化存储
    def parse(self, response):
        print("****数据转换*****")
        a_lst = response.xpath('//*[@id="js_index2017_wrap"]/div[3]/div[1]/div[2]/ul/li[1]/a')
        all_data = []  # 存储所有解析到的数据
        for a in a_lst:
            # xpath返回的是列表,但是列表元素一定是selector类型的对象
            # extract 可以将Selector对象中data参数存储的字符串提取出来
            # print(a.xpath('./text()')[0].extract())
            title = a.xpath('./text()')[0].extract()

            item = WangyiproItem()
            item['title'] = title

            # 将item提交给了管道
            yield item