import pprint
import json
from tkinter.constants import CURRENT

import requests
import time
from bs4 import BeautifulSoup
import selenium
import pandas as pd
import logging
from lxml import etree

urls = ["202411","202410","202409","202408","202407","202406"]
def get_html_from_url(urls):
    htmls = []
    for u in urls:
        url = f"https://www.dongchedi.com/sales/sale-x-{u}-x-x-x-x"
        print("李星烨+周天泽+慈龙生+张治国+高子欣(爬虫小组期末作业无恶意求放过):"+url)
        headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
                    ,'Cookie':'ttwid=1%7Czqjo7yuaFWyUSM0mBbMU2nVndA60ttyhUEqHGzj4pKE%7C1733880505%7C102883770f9b2db871bbaf46870c825643dcafeebc29d3d90404df4c84cf699d; tt_webid=7446959864185423385; tt_web_version=new; is_dev=false; is_boe=false; x-web-secsdk-uid=746bf3cb-087b-4658-a377-902289a9aabe; Hm_lvt_3e79ab9e4da287b5752d8048743b95e6=1733880511; HMACCOUNT=C94BBD1726FAC944; city_name=%E5%8C%97%E4%BA%AC; s_v_web_id=verify_m4j7ppws_JjGtC4jM_dj9T_4mPe_9a9y_YnAATc55HP3T; passport_csrf_token=c191478ccc691d5c653cdbad3e1cf8ca; passport_csrf_token_default=c191478ccc691d5c653cdbad3e1cf8ca; passport_auth_status=24ae71121dc748c29dd0be4a76c69583%2C; passport_auth_status_ss=24ae71121dc748c29dd0be4a76c69583%2C; sid_guard=062629642d1dfdd2a4e2a2d137036c2d%7C1733881316%7C5184000%7CSun%2C+09-Feb-2025+01%3A41%3A56+GMT; uid_tt=7cdf20aa14813437b651d805cf2fb719; uid_tt_ss=7cdf20aa14813437b651d805cf2fb719; sid_tt=062629642d1dfdd2a4e2a2d137036c2d; sessionid=062629642d1dfdd2a4e2a2d137036c2d; sessionid_ss=062629642d1dfdd2a4e2a2d137036c2d; is_staff_user=false; sid_ucp_v1=1.0.0-KDUwOTFlNjFkMzhlMWFiZDkxN2Q0YTU3YjRmY2JiZWIwNjgwZjliMDcKGQieqLC6-fSNAhDk2-O6BhivDiAMOAJA8QcaAmxmIiAwNjI2Mjk2NDJkMWRmZGQyYTRlMmEyZDEzNzAzNmMyZA; ssid_ucp_v1=1.0.0-KDUwOTFlNjFkMzhlMWFiZDkxN2Q0YTU3YjRmY2JiZWIwNjgwZjliMDcKGQieqLC6-fSNAhDk2-O6BhivDiAMOAJA8QcaAmxmIiAwNjI2Mjk2NDJkMWRmZGQyYTRlMmEyZDEzNzAzNmMyZA; user_data=%7B%22gender%22%3A0%2C%22name%22%3A%22%E5%92%82%E6%91%B8%E5%92%82%E6%91%B8%22%2C%22screen_name%22%3A%22%E5%92%82%E6%91%B8%E5%92%82%E6%91%B8%22%2C%22user_id%22%3A1187092844254238%2C%22avatar_url%22%3A%22https%3A%2F%2Fp9-passport.byteacctimg.com%2Fimg%2Fuser-avatar%2Ff72a2bbf0815e08e23b20d93ff521349~300x300.image%22%2C%22mobile%22%3A%22180******10%22%7D; _gid=GA1.2.269695857.1733881330; Hm_lpvt_3e79ab9e4da287b5752d8048743b95e6=1733881944; _ga_YB3EWSDTGF=GS1.1.1733881344.1.1.1733881953.49.0.0; _ga=GA1.1.1162618048.1733881330; odin_tt=0229ab21e70de36c29fdfe7a6b8e3ff658bad19b8302e300801f074d559f9512ff4e519de03943abad247cab4bf096d6'
                   }

        response = requests.get(url, headers=headers)
        if(response.status_code!=200):
            logging.error("请求失败!!!",response.status_code)

        htmls.append(response.text)
        time.sleep(3)
    return htmls


def parse_single_html(html):
    soup = BeautifulSoup(html, 'html.parser')
    cars = (soup.find("div",class_="jsx-495096444 tw-col-span-5")).find_all("li",class_="list_item__3gOKl")
    datas = []
    for car in cars:
        car_name = car.find("a",class_="tw-font-semibold").get_text()
        print(car_name,end=" ")

htmls = get_html_from_url(urls)
parse_single_html(htmls[0])
# print(response.status_code)
# print(html)