import scrapy
import re

class RenrenSpider(scrapy.Spider):
    name = "renren"
    allowed_domains = ["renren.com"]
    start_urls = ["http://www.renren.com/327550029/profiel"]    #个人主页

    def start_requests(self):   #覆盖父类的start_requests方法
        cookies = "anonymid=k49uhxdk-bvmpxa; depovince=GW; jebecookies=9e1cc81d-f807-44b4-a34f-229c2b5616b4|||||; _r01_=1; JSESSIONID=abcmniNAmgubj6KnQ1t8w; ick_login=da2b6b75-966c-412a-be08-e05127859d97; _de=1EDB9396C3D240E5"
        #字符串转换成字典形式——字典推导式
        cookies = {i.split("=")[0]:i.split("=")[1] for i in cookies.split("; ")}
        yield scrapy.Request(
            self.start_urls[0],
            callback=self.parse,
            cookies=cookies,
        )
        # headers = {"Cookie": cookies}
        # yield scrapy.Request(
        #     self.start_urls[0],
        #     callback=self.parse,
        #     headers=headers,
        # )
        # 空列表——不能把cookies放入headers


    def parse(self, response):
        print(re.findall("毛沼郡",response.body.decode()))
        yield scrapy.Request(
            "http://......", #个人资料页
            callback=self.parse_detail
        )

    def parse_detail(self,response):
        print("*"*100)
        print(re.findall("毛沼郡",response.body.decode()))

"""
注意1：关掉robots协议
注意2：settings.py中COOKIES_ENABLED 默认为True（开启），下次的请求会带上上次的cookie
注意3：settings.py中添加参数：COOKIES_DEBUG = TRUE——能够看见不同请求传递的具体过程
-------------------------------------------------------
"""