# -*- coding: utf-8 -*-
import scrapy
import json


class SnowballSpider(scrapy.Spider):
    name = 'snowball'
    allowed_domains = ['xueqiu.com']
    start_urls = ['https://xueqiu.com/v4/statuses/public_timeline_by_category.json','https://www.baidu.com']

    def start_requests(self):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0"}
        # 指定cookies
        cookies = {
            'uuid': '66a0f5e7546b4e068497.1542881406.1.0.0',
            '_lxsdk_cuid': '1673ae5bfd3c8-0ab24c91d32ccc8-143d7240-144000-1673ae5bfd4c8',
            '__mta': '222746148.1542881402495.1542881402495.1542881402495.1',
            'ci': '20',
            'rvct': '20%2C92%2C282%2C281%2C1',
            '_lx_utm': 'utm_source%3DBaidu%26utm_medium%3Dorganic',
            '_lxsdk_s': '1674f401e2a-d02-c7d-438%7C%7C35'}
        formdata = {
            'sin_id' : -1,
            'category' : 6
        }
        temp = json.dumps(formdata)


        # 再次请求到详情页，并且声明回调函数callback，dont_filter=True 不进行域名过滤，meta给回调函数传递数据
        yield scrapy.Request(self.start_urls[0], headers=headers, cookies=cookies, callback=self.parse,
                      dont_filter=True,body=temp,method="POST")

    def parse(self, response):
        print(111)
