import json
import random
import re
from chaojidddd import parse_base64
from urllib.parse import unquote

import scrapy
from bs4 import BeautifulSoup


class InformationSpider(scrapy.Spider):
    name = "information"
    custom_settings = {
        'REDIRECT_ENABLED': False  # 禁用重定向
    }
    handle_httpstatus_list = [302]  # 处理302状态码

    def start_requests(self):
        urls = ["https://www.lianjia.com/city/"]
        yield scrapy.Request(url=urls[0], callback=self.parse)

    def get_token(self):
        token = ""
        m = "Rz94PnPbzNDiHpb28GcTjFFpg8Pm8DozBejeYqKrTc8RusEJ8YicVnRmEjUHDK7r"

        for i in range(0, 32):
            n = random.choice(m)
            token += n
        return token

    def parse_get_base64(self, response):
        self.base64 = response.json()['data']['puzzle']["resource"][24:]
        # print(self.base64)
        verification = parse_base64(self.base64)
        print(verification)
        yield scrapy.Request(
            url=f'https://captcha.lianjia.com/captcha/pre-validate?sceneId=sec-hip&token={self.token}&challenger={verification}',
            callback=self.parse_pre_validate)

    def parse_pre_validate(self, response):
        data = {
            "token": self.token,
            "scene": "sec-hip",
            "location": self.location,
            "ext": self.ext
        }
        print(json.dumps(data))
        if response.json()['data']:
            print(response.json()['data'])
            yield scrapy.Request(url="https://hip.lianjia.com/api/v1/validate", method='post', body=json.dumps(data),
                                 callback=self.parse_validate)
        else:
            print(f"验证码解析失败")
            yield scrapy.Request(url=f"https://captcha.lianjia.com/captcha/resource?sceneId=sec-hip&token={self.token}",
                                 callback=self.parse_get_base64, dont_filter=True)

    def parse_validate(self, response):
        print(response.status)
        if response.status == 302:
            url = unquote(self.location)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)

    def parse(self, response):
        if response.status == 302:
            print(response.headers)
            target = response.headers['Location'].decode('utf-8')
            self.location = re.search(r'location=(.*?)&ext=(.*)', target).group(1)
            self.ext = re.search(r'location=(.*?)&ext=(.*)', target).group(2)
            self.token = self.get_token()
            yield scrapy.Request(url=f"https://captcha.lianjia.com/captcha/resource?sceneId=sec-hip&token={self.token}",
                                 callback=self.parse_get_base64)
        else:
            print(f"成功")
            # print(response.text)
            soup = BeautifulSoup(response.text, "lxml")
            city_tags = soup.select(".city_list li a")
            for city_tag in city_tags:
                city_url = city_tag['href']
                city_name = city_tag.text
                print(city_name, city_tag['href'])
                print(city_url, '======')
                yield scrapy.Request(url=city_url, callback=self.parse_city, meta={'city_name': city_name})
                break

    def parse_city(self, response):
        print(response.status)
        if response.status == 301:
            target = response.headers['Location'].decode('utf-8')
            self.location = re.search(r'location=(.*?)&ext=(.*)', target).group(1)
            self.ext = re.search(r'location=(.*?)&ext=(.*)', target).group(2)
            self.token = self.get_token()
            yield scrapy.Request(url=f"https://captcha.lianjia.com/captcha/resource?sceneId=sec-hip&token={self.token}",
                                 callback=self.parse_get_base64, dont_filter=True)

        else:

            print(f"{response.meta['city_name']}访问成功", response.url)
            city_detail_second_hand_house_url = response.url + 'ershoufang/rs/'
            print(city_detail_second_hand_house_url)
            yield scrapy.Request(url=city_detail_second_hand_house_url, callback=self.parse_second_hand_house_detail
                                 , cookies={
                    "srcid": "eyJ0Ijoie1wiZGF0YVwiOlwiMmQ3NGUxYmM5Zjg0YzgzYzhhYTgxM2M0NzYwMjU2NjRlOWU5NDIzNTFlYTNkMzU5NTk3OWNjZmUyMzMwYTU0NTk2ZTIzNDMxMTlkZjdiNGQwNGQ4ZWY3NDBhNjM4YjczODcyMjJjMzY4MDhkZDY0NzhjNjBiYmVlYmM0MjNhZTdhZmRjMWM2ZWVmZmE5Mzc3YTk4N2QzNjU1MzY4ZWQyNjYxZjI2MWZkMmU1YmJmZWMyOWIzYjEzODIyNWVkNTZjZWM4YWYxM2U5YjRkNDYwNGNlZmQ3NzBjOTUyZmVhYmFjYzFiY2VhMzE3Yzk3MzMxYzQ2NmY0ODA0YTRlODIwN1wiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCIzOGI2NzQzMlwifSIsInIiOiJodHRwczovL2FxLmxpYW5qaWEuY29tL2Vyc2hvdWZhbmcvcnMvIiwib3MiOiJ3ZWIiLCJ2IjoiMC4xIn0="})

    def parse_second_hand_house_detail(self, response):
        print(response.status)
        if response.status == 302:
            print(response.url)
            print(response.status)
            redirect_url = response.headers['Location'].decode('utf-8')
            print(redirect_url)
