import jsonpath
import scrapy
import execjs
# 1读取js文件
import requests
import json
import re
from ..items import *
"""
目的 通过scrapy爬取网易云评论 认识到 scrapy如何发送post请求 
网易云
步骤
1把爬取评论的业务逻辑直接放到 parse方法里面 
2需要把start_urls注释掉 
3重写发送请求的方法   def start_requests(self):方法  专门发送post请求的 
4scrapy框架添加cookie 不能直接以键值对进行添加 
"""

class WyySpider(scrapy.Spider):
    name = 'wyy'
    allowed_domains = ['music.163.com']
    # start_urls = ['http://xx.com/']
    # 这个方法 是写死的  不要更改
    def start_requests(self):
        start_url="https://music.163.com/weapi/comment/resource/comments/get?csrf_token="
        d = {"rid": "R_SO_4_1826307499",
             "threadId": "R_SO_4_1826307499",
             "pageNo": 4,
             "pageSize": 40,
             "cursor": "1650562275327",
             "offset": 60,
             "orderType": "1",
             "csrf_token": ""}
        da = json.dumps(d)
        with open(r"D:\六星教育\2203期\2022_3_python\23.scrapy中间件使用\wangyiyun\wangyiyun\spiders\网易云.js", "r", encoding="utf-8")as file1:
            jsdata = file1.read()
        # 2通过将读取出来的 js代码 传递给 execjs的compile方法  返回一个 执行器对象
        exe = execjs.compile(jsdata)
        # 3通过执行器对象执行 js代码里面的call函数   call方法的第二个参数 是传递给 方法的参数
        result = exe.call("get", da)
        # 4得到函数抛出的返回值

        data = {
            "params": result["encText"],
            "encSecKey": result["encSecKey"]
        }
    # 使用scrapy.FormRequest回调 发送post请求    parse方法
        cookies = "NTES_P_UTID=Yklzz9TS3OquVzILtYxhaNDpNc5YrLBC|1646749579; P_INFO=jiuhaoyyds@163.com|1646749579|0|mail163|00&99|null&null&null#sic&510100#10#0#0|&0|mail163|jiuhaoyyds@163.com; nts_mail_user=jiuhaoyyds@163.com:-1:1; _ntes_nnid=eb55f8bc260dc3ecc0eb8f07db5b4aa5,1646749584744; _ntes_nuid=eb55f8bc260dc3ecc0eb8f07db5b4aa5; NMTID=00OAs-1hi1-5wcgFU3HlA2gKhqUwx8AAAF_aeyKoQ; WNMCID=iyzbho.1646749584881.01.0; WEVNSM=1.0.0; WM_TID=nNtS2GTdkAlFBBBQBQZv7ynH%2FfCQm%2F3T; _iuqxldmzr_=32; WM_NI=L1WirorGlM3qC8emwZDrCpUM%2BSOt1nSSC%2FdntNYsCVrQwovMKHEoGLF9yZSySWtlgio8MG9%2BynX3Cy%2BW1v9EDdoomeBnAs6LUoVCrebgnDvxIhB77mtWIrp%2BDg1cLNgHUHE%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee84f24bb5b6bab4cc609c9a8aa2d14f829b8b87d14f98869bd7d970a3b88197b62af0fea7c3b92a90ef998af165b299a4aeb27b81e8ab8ef65288ae8d91d847e98d8195b466918d8dd0e766b8929794c25eb28fb990f54789988b90c64886e7b7bbd2739189bc8de663ade883a8bc21bbb298d4c273e989ff97bc4f88bca190f06f82b28f94b54897b19aa4f16fa2b9a2a5f5748bb88ed8b667b8898793f32188b9ff84cf46a5989ba9c837e2a3; JSESSIONID-WYYY=c8256qOYjFrxpSGhXPrAV8sZ3s1dIAVHsl0kjy3H%5CkzPKx732hI9XI1fsWiWTi%2BoixQu8G6YViCV%5C%5Ce6hM0jui1lRKiPh1PeRc935bTPNCs9MAxbxYyRXFYRnv6%5C7%2BloAEoC99snZYw28sKloWnc49JUq%2F2FpI3k8xWpQMrbkypev8%5CR%3A1650630185037"
        cookie_dict = {
            i.split("=")[0]: i.split("=")[1] for i in cookies.split("; ")
        }
        yield scrapy.FormRequest(url=start_url,formdata=data,callback=self.parse,cookies=cookie_dict)
    def parse(self, response):
        item=WangyiyunItem()
        content=response.text
        data=json.loads(content)
        contents=jsonpath.jsonpath(data,"$..content")
        item["content"]=contents
        # print(content)
        return item

