# 用 butifuil soup 爬取网页內容, 并将爬取的内容存入 mongodb 数据库中
# 由于 akd 中的数据量不够, 所以选择其它网站进行爬取
import requests
from bs4 import BeautifulSoup
import pymongo


def fetch_page_content(page_number):
    url = f"https://www.akd.cn/clist/ar0pg{page_number}"
    response = requests.get(url)
    if response.status_code == 200:
        return response.text
    else:
        print(f"Failed to retrieve page {page_number}")
        return None

def parse_page_content(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    links = []
    # 查找指定的容器，获取页面中所有符合条件的内容
    selected_elements = soup.select('#carlist > div > div > a')
    for element in selected_elements:
        links.append(element['href'])
    return links

def get_photo(photo_url):
    response = requests.get(photo_url)
    if response.status_code == 200:
        return response.content
    else:
        print(f"Failed to retrieve photo {photo_url}")
        return None

def parse_car_detail(links):
    client = pymongo.MongoClient("mongodb://120.53.220.118:27000/")
    db = client["car_database"]
    collection = db["car_collection"]

    for link in links:  # 处理所有链接
        response = requests.get(link)
        if response.status_code == 200:
            html_content = response.text
            soup = BeautifulSoup(html_content, 'html.parser')
            # 获取车辆详细信息
            brand = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(1) > p:nth-child(1) > span.row_Context').text.replace('\n', '').strip()
            year = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(2) > p:nth-child(1) > span.row_Context').text.replace('\n', '').strip()
            mileage = soup.select_one('#akd_content > div.content_head > div > div.Car_Price_Right > ul > li:nth-child(2) > p:nth-child(1)').text.replace('\n', '').strip()
            color = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(4) > p:nth-child(1) > span.row_Context').text.replace('\n', '').strip()
            price = soup.select_one('p.CarPrice > span').text.replace('\n', '').strip() + " 万"
            # 获取车辆配置信息
            configuration = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(2) > p:nth-child(2) > span.row_Context').text.replace('\n', '').strip()
            configuration += ', ' + soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(3) > p:nth-child(1) > span.row_Context').text.replace('\n', '').strip()
            env_standard = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(3) > p:nth-child(2) > span.row_Context')
            if env_standard:
                env_standard_text = env_standard.contents[0].replace('\n', '').strip()  # 只保留主要信息
                configuration += ', ' + env_standard_text
            configuration += ', ' + '内饰' + soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(4) > p:nth-child(2) > span.row_Context').text.replace('\n', '').strip()
            configuration += ', ' + soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(5) > p:nth-child(1) > span.row_Context').text.replace('\n', '').text().strip() + '座'
            condition_description = soup.select_one('#akd_content > div.Car_Main.W1200 > div:nth-child(6) > p > span.row_Context').text.replace('\n', '').text().strip()
            photo_url = soup.select_one('#akd_content > div.content_head > div > div.Car_Price_left > a > img')
            photo = get_photo(photo_url['src'])

            car_data = {
                "Brand": brand,
                "Year": year,
                "Mileage": mileage,
                "Color": color,
                "Price": price,
                "Configuration": configuration,
                "ConditionDescription": condition_description,
                "Photo": photo
            }

            collection.insert_one(car_data)
            print(f"Inserted car data for Brand: {brand}, Year: {year}")

# 字段名	数据类型	允许空值	是否为主键	字段说明
# VehicleID	bigint(20)	否	是	车辆唯一标识符   √
# Brand	    varchar(200)	否	否	车辆品牌    √
# Model	    varchar(200)	否	否	车辆型号    ×
# Year	    varchar(200)	否	否	生产年份    √
# Mileage	varchar(200)	否	否	行驶里程    √
# Color	    varchar(200)	否	否	车辆颜色    √
# Price	    varchar(200)	否	否	车辆价格    √
# Configuration	varchar(200)	否	否	车辆配置(能源类型,驱动形式,环保标准,内饰颜色,座位数)   √ 
# ConditionDescription	varchar(200)	否	否	车况描述   √
# Photo	varchar(200)	否	否	车辆照片    √
# RepairHistory	varchar(200)	否	否	维修历史    ×

def main():
    total_cars = 0
    page_number = 1
    while total_cars < 2000:
        html_content = fetch_page_content(page_number)
        if html_content:
            # 获取当前页面中每个车辆的详细链接
            links = parse_page_content(html_content)
            # 处理每个车辆的详细链接
            parse_car_detail(links)
            total_cars += len(links)
            print(f"Total cars processed: {total_cars}")
        page_number += 1

if __name__ == "__main__":
    main()

