# -*- coding: utf-8 -*-
import os
import uuid

import scrapy
from scrapy import Request
from scrapy.utils.project import get_project_settings
from WuSongSpider.items import WusongspiderItem
import json
import types


class WusongSpider(scrapy.Spider):
    settings = get_project_settings()
    name = 'wusong'
    allowed_domains = ['www.itslaw.com']
    # settings = get_project_settings()
    # cookie = settings['COOKIE']
    # cookie = meta={'cookiejar':1}
    # 发送给服务器的http头信息，有的网站需要伪装出浏览器头进行爬取，有的则不需要
    headers = {
        'Connection': 'keep - alive',  # 保持链接状态
        'Accept': 'application/json, text/plain, */*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Content-Type': 'application/json;charset=UTF-8',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
    }
    # 对请求的返回进行处理的配置
    meta = {
        'dont_redirect': True,  # 禁止网页重定向
        'handle_httpstatus_list': [301, 302]  # 对哪些异常返回进行处理
    }
    # 初始URL
    start_urls = ['https://www.itslaw.com/views/user/login/login.html',
                  ]

    formdata = {'userName': '296298319@qq.com', 'password': 'emh5MTIzNDU2', 'isAutoLogin': 'false' }

    def parse(self, response):
        # return scrapy.FormRequest.from_response(
        #     response,
        #     formdata={'userName': '1296298319@qq.com', 'password': 'emh5MTIzNDU2', 'isAutoLogin': 'false'},
        #     callback=self.after_login
        # )
        return scrapy.FormRequest(
            'https://www.itslaw.com/api/v1/users/user/login/encryption',
            formdata={'userName': '296298319@qq.com', 'password': 'emh5MTIzNDU2', 'isAutoLogin': 'false'},
            callback=self.after_login
        )

    # 获取数据链接
    def after_login(self, response):
        ob = json.loads(response.body_as_unicode())
        url = 'https://www.itslaw.com/api/v1/users/user/loginInfo'
        # 登录
        if ob['result']['code'] == 0:
            yield scrapy.Request(url,callback=self.parse_profile)


    def parse_profile(self, response):
        startPage = 0
        url = 'https://www.itslaw.com/api/v1/caseFiles?startIndex={page}&countPerPage=20&sortType=1&' \
                'conditions=searchWord%2B%E6%88%BF%E4%BA%A7%2B1%2B%E6%88%BF%E4%BA%A7'.format(page=startPage)
        yield scrapy.Request(url, callback=self.get_content)


    def get_content(self, response):
        contentList = json.loads(response.body_as_unicode())
        for ob in contentList['data']['searchResult']['judgements']:
            print(ob['id'])
            print('标题:' + ob['title'])  # 标题
            print('案件类型:' + ob['caseType'])  # 案件类型
            print('文书性质:' + ob['judgementType'])  # 文书性质
            print('审理法院:' + ob['courtName'])  # 审理法院
            print('案　　号:' + ob['caseNumber'])  # 案　　号
            print('裁判日期:' + ob['judgementDate'])  # 裁判日期
        # 详情信息
        url = 'https://www.itslaw.com/detail?judgementId=4eeaf845-679a-475b-8caf-a5635537430c&area=0&index=2' \
              '&sortType=1&count=4799807&conditions=searchWord%2B%E6%88%BF%E4%BA%A7%2B1%2B%E6%88%BF%E4%BA%A7'
        startPage = 20
        url1 = 'https://www.itslaw.com/api/v1/caseFiles?startIndex={page}&countPerPage=20&sortType=1&' \
              'conditions=searchWord%2B%E6%88%BF%E4%BA%A7%2B1%2B%E6%88%BF%E4%BA%A7'.format(page=startPage)
        yield scrapy.Request(url1, callback=self.get_content)


    def get_deal(self, response):
        contentList = json.loads(response.body_as_unicode())