"""
爬取评论
"""
import requests
from lxml import etree
from db_utils import UserReviewDB
import time


def getRoot(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36 Edg/86.0.622.43'}
    r = requests.get(url, headers=headers)
    r.encoding = 'utf-8'
    root = etree.HTML(r.text)
    return root


def parseComment(comment):
    user_nickname_list = comment.xpath("./aside/header/div[2]/h2/a/text()")
    user_nickname = None
    if len(user_nickname_list) > 0:
        user_nickname = user_nickname_list[0]
    pickup_date_list = comment.xpath("./aside/section/div[2]/div[1]/div[1]/p[1]/text()")
    pickup_date = None
    if len(pickup_date_list):
        pickup_date = pickup_date_list[0]
    purchase_location_list = comment.xpath("./aside/section/div[2]/div[1]/div[2]/p[1]/text()")
    purchase_location = None
    if len(purchase_location_list):
        purchase_location = purchase_location_list[0]
    naked_car_price_list = comment.xpath("./aside/section/div[2]/div[1]/div[3]/p[1]/text()")
    naked_car_price = None
    if len(naked_car_price_list):
        naked_car_price = naked_car_price_list[0]
    fuel_consumption_list = comment.xpath("./aside/section/div[2]/div[1]/div[4]/p[1]/text()")
    fuel_consumption= None
    if len(fuel_consumption_list):
        fuel_consumption = fuel_consumption_list[0]
    car_model_list = comment.xpath("./section/header/h2/span[2]/text()")
    car_model = None
    if len(car_model_list):
        car_model = car_model_list[0]
    rating_list = comment.xpath("./section/header/a/span[1]/text()")
    rating = None
    if len(rating_list):
        rating = rating_list[0]
    review_time_list = comment.xpath("./section/div[3]/span/text()")
    review_time = None
    if len(review_time_list):
        review_time = review_time_list[0]
    review_content_list = comment.xpath("./section/p/text()")
    review_content = None
    if len(review_content_list):
        review_content = review_content_list[0]
    return {
        'user_nickname': user_nickname,
        'pickup_date': pickup_date,
        'purchase_location': purchase_location,
        'naked_car_price': naked_car_price,
        'fuel_consumption': fuel_consumption,
        'car_model': car_model.strip(),
        'rating': rating,
        'review_time': review_time,
        'review_content': review_content
    }


def parsePageComment(root):
    """
    解析评论
    :param root:
    :return:
    """
    comments = root.xpath('//*[@id="__next"]/div/div[2]/div[3]/section/section[1]/article')
    comment_data = []

    for comment in comments:
        comment_data.append(parseComment(comment))
    return comment_data

def query_max_page(url):
    """
    解析页面
    :param carid:
    :param page:
    :return:
    """
    root = getRoot(url)
    xpath = root.xpath('//*[@id="__next"]/div[1]/div[2]/div[3]/section/section[1]/div[2]/ul/li')
    max_page = xpath[-2].xpath('./a/span/text()')
    return max_page[0]


if __name__ == '__main__':
    base_url = 'https://www.dongchedi.com/auto/series/score/{carid}-x-S0-x-x-1-{page}'
    carid = 305
    url = base_url.format(carid=carid, page=1)
    print(url)
    max_page = query_max_page(url)
    print(f'当前carid：{carid}, 最大页数：{max_page}')
    db = UserReviewDB()
    for i in range(1, int(max_page)+1):
        url = base_url.format(carid=carid, page=i)
        print(f'开始访问页面：{url}', )
        root = getRoot(url)
        comment_data = parsePageComment(root)
        if comment_data:
            for data in comment_data:
                data['carid'] = carid
                db.insert_review(data)
        time.sleep(10)  # 每页访问间隔5秒
