import json
from proxy_helper import ProxyHelper
import requests
from lxml import etree
import time


class NewPianChangSpider:
    def __init__(self):
        self.counter = 0
        self.page = 0
        self.proxy_helper = ProxyHelper()

    def run(self):
        url = "https://www.xinpianchang.com/_next/data/XOJMH-XpfII0gvuh2g4Dq/index.json"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Cookie": "Device_ID=3i8fyc48lm216lhdg; MEIQIA_TRACK_ID=2nBBjy6vGLmtVbTq8KcQfAmHVUA; MEIQIA_VISIT_ID=2nBBjvM2aFubeWrmhoYqna0Gypn; Authorization=4EC46BA2681DB6A66681DB44C5681DBACB6681DB65EE9AFC6F64; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2215041561%22%2C%22first_id%22%3A%221926edb6166a-07d127846f2749-26001051-2073600-1926edb6167938%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_utm_source%22%3A%22xpcWeb%22%2C%22%24latest_utm_medium%22%3A%22navigatorCate%22%2C%22%24latest_utm_campaign%22%3A%22scjp%22%7D%2C%22%24device_id%22%3A%221926edb6166a-07d127846f2749-26001051-2073600-1926edb6167938%22%7D; _ga=GA1.2.1447999405.1729768816; _gid=GA1.2.1844458940.1729768816; Hm_lvt_446567e1546b322b726d54ed9b5ad346=1729682423,1729841524; HMACCOUNT=AFD53293F7A60F46; Hm_lpvt_446567e1546b322b726d54ed9b5ad346=1729841631"
        }
        category_detail = self.proxy_helper.requests_with_proxy(url=url, headers=headers)
        category_detail = category_detail.content.decode()
        category_detail = json.loads(category_detail)
        print(category_detail)
        for category in category_detail['pageProps']['squareData']['navigation']['categories']:
            print(category['name'], category['link'])
            page = 0
            while page < 100:
                page += 1
                if page == 1:
                    url = f"{category['link']}-all-all-0-0-pick"
                    print('---------')
                else:
                    url = f"{category['link']}-all-all-0-0-pick-pp{page}"
                    print('+++++++++')
                response = requests.get(url, headers=headers)
                result = response.content.decode()
                root = etree.HTML(result)
                div_list = root.xpath("//div[@class='sc-7a811143-0 eVXfIM']")
                print(len(div_list))
                if len(div_list) == 0:
                    print("没有数据")
                    time.sleep(1)
                    page -= 1
                    continue
                for div in div_list:
                    self.counter += 1
                    title = div.xpath(".//h2/text()")[0]
                    play_hot = div.xpath(".//ul[@class='sc-8f6caae3-0 gzlhEC']/li/span/text()")
                    play = play_hot[1]
                    hot = play_hot[3]
                    duration = div.xpath(".//div[@class='text-xs']/text()")[0]
                    # 模糊匹配
                    author = div.xpath(".//div[contains(@class,'text-gray-800')]/text()")[0]
                    print(self.counter, title, play, hot, duration, author)


if __name__ == '__main__':
    NewPianChangSpider().run()
