# dcd 网站虽然有反爬机制，但是通过分析网页源代码，发现车辆的价格、行驶里程等信息是通过字体文件加密的
# 所以需要通过下载字体文件，解析字体文件，将加密的信息解密，然后再解析车辆的详细信息
import requests
from bs4 import BeautifulSoup
from lxml import html
import pymongo
import random
from fontTools.ttLib import TTFont
# from PIL import Image
# from io import BytesIO

headers_list = [
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
]

def fetch_page_content(page_number):
    url = f"https://www.dongchedi.com/usedcar/x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-1-{page_number}-x-x-x-x-x"
    response = requests.get(url, headers= random.choice(headers_list))
    if response.status_code == 200:
        return response.text
    else:
        print(f"Failed to retrieve page {page_number}")
        return None

def parse_page_content(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    links = []
    # 查找指定的容器，获取页面中所有符合条件的内容
    for i in range(1, 61):
        selected_elements = soup.select(f'#__next > div > div.new-main.new > div > div > div.jsx-2898915557.wrap > ul > li:nth-child({i}) > a')
        for element in selected_elements:
            links.append(element['href'])
    return links

def get_photo(photo_url):
    response = requests.get(photo_url)
    if response.status_code == 200:
        return response.content
    else:
        print(f"Failed to retrieve photo {photo_url}")
        return None

def download_font(url, save_path):
    response = requests.get(url)
    with open(save_path, 'wb') as f:
        f.write(response.content)

def parse_font(font_path):
    font = TTFont(font_path)
    cmap = font.getBestCmap()
    return cmap

def replace_text(text, cmap):
    new_text = ''.join(cmap.get(ord(char), char) for char in text)
    return new_text

def parse_car_detail(links, mongo_uri, mongo_db, mongo_collection):
    client = pymongo.MongoClient(mongo_uri)
    db = client[mongo_db]
    collection = db[mongo_collection]

    font_url = 'https://lf6-awef.bytetos.com/obj/awesome-font/c/6ba9901b0b043f2.woff2'  # 替换为实际的URL
    font_path = './font.woff'
    download_font(font_url, font_path)

    cmap = parse_font(font_path)

    glyph_dict = {
        'gid58451': '0',
        'gid58685': '1',
        'gid58352': '2',
        'gid58402': '3',
        'gid58412': '4',
        'gid58524': '5',
        'gid58411': '6',
        'gid58622': '7',
        'gid58696': '8',
        'gid58568': '9',
        'gid58463': '万',
        'gid58673': '公',
        'gid58620': '里'
    }

    for link in links:
        full_link = "https://www.dongchedi.com" + link  # 给链接加上前缀
        response = requests.get(full_link, headers=random.choice(headers_list))
        response.encoding = response.apparent_encoding
        if response.status_code == 200:
            html_content = response.content
            tree = html.fromstring(html_content)
            # 获取车辆详细信息 
            brand = tree.xpath('/html/body/div[1]/div/div[2]/div/div[2]/div[2]/div[1]/h1/span/text()')[0].strip()
            year = tree.xpath('/html/body/div[1]/div/div[2]/div/div[4]/div[2]/div[1]/div/div[4]/p[1]/text()')[0].strip()
            mileage = tree.xpath('/html/body/div[1]/div/div[2]/div/div[2]/div[2]/div[5]/div/div[2]/p[1]/text()')[0]
            new_mileage = replace_text(mileage, cmap)
            # 使用字典替换字符
            for key, value in glyph_dict.items():
                new_mileage = new_mileage.replace(key, value)
            price = tree.xpath('/html/body/div[1]/div/div[2]/div/div[2]/div[2]/div[3]/div/p/text()')[0]
            new_price = replace_text(price, cmap)
            # 使用字典替换字符
            for key, value in glyph_dict.items():
                new_price = new_price.replace(key, value)
            # 车辆配置 = 变速箱类型 + 排量 + 车源地
            configuration = '变速箱: ' + tree.xpath('/html/body/div[1]/div/div[2]/div/div[4]/div[2]/div[1]/div/div[6]/p[1]/text()')[0]
            configuration += ', 排量: ' + tree.xpath('/html/body/div[1]/div/div[2]/div/div[4]/div[2]/div[1]/div/div[5]/p[1]/text()')[0]
            configuration += ', 车源地: ' + tree.xpath('/html/body/div[1]/div/div[2]/div/div[4]/div[2]/div[1]/div/div[2]/p[1]/text()')[0]

            condition_description = tree.xpath('/html/body/div[1]/div/div[2]/div/div[4]/div[2]/div[2]/p/text()')[0]

            soup = BeautifulSoup(html_content, 'html.parser')
            photo_url = soup.select_one('#\\34  > ul > li:nth-child(1) > div > img')['src']
            if not photo_url.startswith('https:'):
                photo_url = 'https:' + photo_url
            photo = get_photo(photo_url)

            # debug
            # print(f"Brand: {brand}, Year: {year}, Mileage: {new_mileage}, Price: {new_price}")
            # print(f"Configuration: {configuration}")
            # print(f"Condition Description: {condition_description}")
            # print("=====================================")
            # 使用PIL显示图片
            # image = Image.open(BytesIO(photo))
            # image.show()

            car_data = {
                "Brand": brand,
                "Year": year,
                "Mileage": new_mileage,
                "Price": new_price,
                "Configuration": configuration,
                "ConditionDescription": condition_description,
                "Photo": photo
            }

            collection.insert_one(car_data)
            print(f"Inserted car data for Brand: {brand}, Year: {year}")

# 字段名	数据类型	允许空值	是否为主键	字段说明
# VehicleID	bigint(20)	否	是	车辆唯一标识符   √
# Brand	    varchar(200)	否	否	车辆品牌    √
# Model	    varchar(200)	否	否	车辆型号    ×
# Year	    varchar(200)	否	否	生产年份    √
# Mileage	varchar(200)	否	否	行驶里程    √
# Color	    varchar(200)	否	否	车辆颜色    ×
# Price	    varchar(200)	否	否	车辆价格    √
# Configuration	varchar(200)	否	否	车辆配置(变速箱类型, 排量, 车源地)   √ 
# ConditionDescription	varchar(200)	否	否	车况描述   √
# Photo	varchar(200)	否	否	车辆照片    √
# RepairHistory	varchar(200)	否	否	维修历史    ×

def main():
    mongo_uri = "mongodb://120.53.220.118:27000/"
    mongo_db = "car_database"
    mongo_collection = "car_collection"

    total_cars = 0
    page_number = 1
    while total_cars < 2000:
        html_content = fetch_page_content(page_number)
        if (html_content):
            # 获取当前页面中每个车辆的详细链接
            links = parse_page_content(html_content)
            # 处理每个车辆的详细链接
            parse_car_detail(links, mongo_uri, mongo_db, mongo_collection)
            total_cars += len(links)
            print(f"Total cars processed: {total_cars}")
        page_number += 1

if __name__ == "__main__":
    main()

