
# -*- coding:utf-8 -*-
# @Author:CL 1037654919@qq.com
# @Time:2022.04.01

import time
import json
import pandas as pd
import pymysql
import re
import csv

from bs4 import BeautifulSoup
import requests
import time
def get_data():
    start_url = 'https://car.autohome.com.cn/baike/index.html'
    resource = 'https://car.autohome.com.cn/baike/index.html'
    html =  requests.get(start_url)
    soup = BeautifulSoup(html.text,'lxml')
    datas = soup.find_all('div',class_="wiki-nav_item")

    for data in datas:
        time.sleep(10)
        try:
            lis = data.find_all('li')
            title1=data.find('a',class_='wiki-nav_item-title').text
            for li in lis:

                title2_fial_list=['史上今天','经典名车','车系历史','汽车人物','品牌介绍','技术体验','拆解分析','技术讲堂','用品体验','养车成本','事故分析','汽车赛事']
                a = li.find('a', href=True)
                href = 'https://car.autohome.com.cn' + a['href']
                title2= a.text
                print(title2,href)
                headers = {'Referer': resource}
                try:
                    html1 = requests.get(href, headers=headers)
                    soup4 = BeautifulSoup(html1.text, 'lxml')
                    detaillist_items = soup4.find_all('div', class_='detaillist_item')
                    for detaillist_item in detaillist_items:
                        lists = []
                        hrefa = 'https://car.autohome.com.cn' + detaillist_item.find('a', href=True)['href']
                        print(hrefa)
                        headers = {'Referer': href}
                        try:
                            html2 = requests.get(hrefa, headers=headers)
                            soup5 = BeautifulSoup(html2.text, 'lxml')

                            if title2 in title2_fial_list:
                                datas5 = soup5.find_all('div', class_='article-details')[0]
                                title3 = datas5.find('h1').text
                            else:
                                datas5 = soup5.find_all('div', class_='wiki-article')[0]
                                title3 = datas5.find('div', class_='wiki-article-title-name').text
                            # print(datas5)
                            string = str(datas5)
                            lists.append([title1, title2, title3, string])
                            yield lists
                        except:
                            print('fialure with {} '.format(hrefa))
                            lists.append([title1, title2, None, None])
                            yield lists

                except:
                    print('loss with {}{}'.format(title2, href))

        except:
            print('this is a big fial ,let us take a rest:10s')
            time.sleep(10)


if __name__ == '__main__':
    print()
    # get_data()






