from kgcar.spider.utils import PageRenderer
from lxml import etree
import re
import os
import csv



def get_series(base_url):
    current_path = os.getcwd()
    file_name = "series.csv"
    brand = current_path + "/../data/brand.csv"
    series = current_path + "/../data/" + file_name
    renderer = PageRenderer() # 初始化Chrome Driver对象
    with open(series, 'wt', newline='', encoding='utf-8')  as seiries_csv:
        header = ['brand_id', 'brand_href', 'brand_name', 'series_href', 'series_id', 'series_name', 'series_count']
        writer = csv.writer(seiries_csv)
        writer.writerow(header)
        # 读取并获得所有URL地址，同时记录品牌名称
        with open(brand, 'r') as brand_csv:
            reader = csv.reader(brand_csv)
            next(reader) # 跳过第一行（header）
            for row in reader:
                brand_id = row[0]
                href = row[3]
                # 逐个URL进行爬取
                # https://car.autohome.com.cn/price/brand-169.html
                # https://car.autohome.com.cn
                response = renderer.render_page(base_url + href)
                html = etree.HTML(response)
                # //*[@id="b117"]/h3/a/text()
                brands = html.xpath('//*[@id="{}"]'.format(brand_id))
                for selector in brands:
                    brand_id = row[0]
                    brand_href = selector.xpath('./h3/a/@href')[0]
                    brand_name = selector.xpath('normalize-space(./h3/a/text())')
                    print(brand_name)

                    modes = html.xpath('//*[@id="{}"]/dl/dd'.format(brand_id))
                    for selector in modes:
                        series_href = selector.xpath('./a/@href')[0]
                        series_id = selector.xpath('./a/@id')[0]
                        series_name = selector.xpath('./a/text()')[0]
                        count_text = selector.xpath('./a/em/text()')[0]
                        count = re.findall("\d+", count_text)[0]

                        # 写入CSV文件
                        csvrow = [brand_id, brand_href, brand_name, series_href, series_id, series_name, count]
                        writer.writerow(csvrow)


if __name__ == '__main__':
    base_url = "https://car.autohome.com.cn"
    get_series(base_url)
    print("处理结束")
