import os
import requests
from pathlib import Path
from bs4 import BeautifulSoup
from koubei.utils import Brand, Manufactor, Series
from koubei.process import pipeline, search, get_series
from koubei.exception import CountPageError

## oneshot use, hard coded
## 此处仅为一个例子
## 最好的做法是从A~Z遍历，然后将所有的品牌、厂商、车系信息保存到数据库中
## 然后通过数据库查询，获取所有的车系信息，然后逐个进行爬取
## 但是这样的做法会增加数据库的复杂度，所以这里只是一个简单的例子
start_uri = 'http://www.autohome.com.cn/grade/carhtml/D.html'
response = requests.get(start_uri)
html = BeautifulSoup(response.content, "html.parser")
brands = [Brand(b) for b in html.find_all('dl')]
brand_name = "大众"

# 查询品牌
target = search(brand_name, brands)

## 以下代码是为了获取所有的车系信息
## bms的b,m,s分别代表brand, manufactor, series
bms_list = []
for manufactor in target.manufactor_list:
    m = Manufactor(*manufactor)
    bms = {
        'brand_id': target.brand_id,
        'brand_name': target.brand_name,
        'manufactor_name': m.manufactor_name,
        'series': []
    }
    for series in m.series_list:
        ss = Series(series)
        if ss.series_id:
            bms['series'].append(
                {
                    'series_id': ss.series_id,
                    'series_name': ss.series_name,
                    'series_price': ss.series_price
                }
            )
    bms_list.append(bms)

series_list = get_series(bms_list)
print(series_list)
## 以上代码是为了获取所有的车系信息

## 示例代码，没有对爬取的json数据进行处理
## 只是简单的爬取数据，然后保存到本地
## 保存的路径为 db_root/brand_name/series_id.json
## 如果已经存在，则跳过
## 如果出现异常，则跳过
db_root = Path('db')
target_path = db_root.joinpath(brand_name)
if target_path.exists():
    done = os.listdir(target_path)
else:
    done = []
undone = [ssid for ssid in series_list if ssid not in done]
for ssid in undone:
    try:
        pipeline(ssid, target_path)
    except CountPageError as e:
        print("Error occur in counting process, {} skipped.".format(ssid))
        continue
    # except Exception as e:
    #     print("Unkown Error occur, skip {} and continue.".format(ssid))
    #     continue