# 买车网百科问答
# author：cl
# time :2022.02.22
#
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import re
import math
from selenium import webdriver

url = "https://www.maiche.com/baike/"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
urldatas=[]
#获取目录链接
def get_urls():
    urls= []
    url = "https://www.maiche.com/baike/"
    html = requests.get(url, headers=HEADERS).text
    soup = BeautifulSoup(html, 'lxml')
    datas = soup.find_all('div', class_='baike-tree-main')[0]
    aa = datas.find_all('a')
    for a in aa:
        href = 'https://www.maiche.com' + a['href']
        name = a.text
        #print(href, name)
        #urls.append([name,href])
        html = requests.get(href, headers=HEADERS).text
        soup = BeautifulSoup(html, 'lxml')
        numbers =int(re.findall(r'\d+',soup.find_all('span',class_='desc')[0].text)[0])
        print(href, name,numbers)
        urls.append([name, href,numbers])
        pages= math.ceil(int(numbers)/10)
        '''
        for j in range(pages):
            href = 'https://www.maiche.com' + a['href']
            href = href+str(j)+'.html'
            urls.append([name,href])
        '''
    pd.DataFrame(urls, columns=['name', 'href','numbers']).to_excel('maiche_baike_urls_base.xlsx')
    return print('get urls complete')

#get_urls()
#获取所有链接
def get_all_urls():
    files = pd.read_excel('maiche_baike_urls_base.xlsx')
    datax = []
    for i in range(len(files)):
        basehref = files.iloc[i]['href']+'0.html'
        name = files.iloc[i]['name']
        try:
            print('begin:',basehref)
            html = requests.get(basehref, headers=HEADERS).text
            soup = BeautifulSoup(html, 'lxml')
            datas = soup.find_all('div', class_='sub-panel')[0]
            aa =datas.find_all('a')
            for a in aa:
                href ='https://www.maiche.com'+a['href']
                title = a.text
                #print(href,title)
                datax.append([name,title,href])
            print('{} get url success '.format(name))
        except:
            print('{} get url fialed '.format(name))

    pd.DataFrame(datax, columns=['connent', 'title', 'href']).to_excel('maiche_baike_urls.xlsx')
    return print('get all url complete')

#get_all_urls()TODO finish the function
def get_data():
    files = pd.read_excel('maiche_baike_urls.xlsx')
    datax = []
    for i in range(len(files)):  ##len(files)
        if (i+1)%20==0:
            print("take a rest :10s")
            time.sleep(10)
        href = files.iloc[i]['href']
        title = files.iloc[i]['title']
        if 'baike' in href:
            print('跳过目录链接：{}'.format(title))
            continue
        connent = files.iloc[i]['connent']
        try:
            print('begin:',href)
            html = requests.get(href, headers=HEADERS).text
            soup = BeautifulSoup(html, 'lxml')
            #TODO:continue the code
            datas = soup.find_all('div', class_='detail')[0]
            text =datas.text.replace('\xa0','').replace('\n','').replace('    ','')

            datax.append([connent, title, href,text])
            print('get data success :{}'.format(title))
        except:
            print('get data fialed :{}'.format(title))
    pd.DataFrame(datax, columns=['connent', 'title', 'href','text']).to_excel('maiche_baike_0324.xlsx')
    return print('get data complete')
#pd.DataFrame(urldatas, columns=['connent', 'titles', 'title','href']).to_excel('xcar_baike_urls.xlsx')


if __name__ =='__main__':
    #get_urls()
    #get_all_urls()
    #get_data()
    pass



