# -*- coding:utf-8 -*-
#爬取：https://www.youchejiuxing.com/qcbk/
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import sqlalchemy


url = "https://www.youchejiuxing.com/qcbk/"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,br',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'
}
urldatas=[]

#获取目录链接
def get_urls():
    urls= []
    url = "https://www.youchejiuxing.com/qcbk/"
    html = requests.get(url, headers=HEADERS,).text
    soup = BeautifulSoup(html, 'lxml')
    datas = soup.find_all('div', class_='baike-categorize')[0]
    #datas = datas.encode('raw_unicode_escape').decode()   #本网页需要解码
    contentss = datas.find_all('div',class_='baike-categorize__section')
    for contents in contentss:
        try:
            title = contents.find_all('a', class_='active')[0].text
            title = title.encode('raw_unicode_escape').decode()  # 本网页需要解码
            hrefs = contents.find_all('a', class_=False)
            for a in hrefs:
                href = a['href']
                content = a.text
                content = content.encode('raw_unicode_escape').decode()  # 本网页需要解码
                urls.append([title, content, href])
        except:pass

    pd.DataFrame(urls, columns=['title', 'content','href']).to_excel('youche_baike_urls_base.xlsx')
    return urls

#get_data
def get_all_urls():
    files = pd.read_excel('youche_baike_urls_base.xlsx')
    datax = []
    print(len(files))
    for i in range(len(files)):  #len(files)
        if (i+1)%20==0:
            print("take a rest :10s")
            time.sleep(10)
        href = files.iloc[i]['href']
        title = files.iloc[i]['title']
        content = files.iloc[i]['content']
        try:
            print('begin:',href)
            htmls = requests.get(href, headers=HEADERS, ).text

            soup = BeautifulSoup(htmls, 'lxml')  #
            datas = soup.find_all('div', id='item_1',class_='item')
            #encode('raw_unicode_escape').decode()
            for data in datas:
                info = data.find_all('div',class_='info')[0].find_all('a',target =True)[0]
                url = info['href']
                name = info.text.encode('raw_unicode_escape').decode()
                date = data.find_all('div',class_='info')[0].find_all('span',class_='date')[0].text
                datax.append([title,content, name,url,date])
            print('get data success :{}'.format(content))
        except:
            print('get data fialed :{}'.format(content))
    pd.DataFrame(datax, columns=[ 'title','content', 'name','href','date']).to_excel('youche_baike_urls.xlsx')
    return datax
def get_data():
    files = pd.read_excel('youche_baike_urls.xlsx')
    datax = []
    for i in range(len(files)):  ##len(files)
        if (i+1)%20==0:
            print("take a rest :10s")
            time.sleep(10)
        href = files.iloc[i]['href']
        title = files.iloc[i]['title']
        content = files.iloc[i]['content']
        name = files.iloc[i]['name']
        date = files.iloc[i]['date']
        try:
            print('begin:',href)
            html = requests.get(href, headers=HEADERS).text
            soup = BeautifulSoup(html, 'lxml')
            datas = soup.find_all('div', class_='con-bd')[0]
            para = datas.find_all('div',class_='para')[0].find('p').text
            # encode('raw_unicode_escape').decode()
            para=para.encode('raw_unicode_escape').decode()
            data = datas.find('div',id='j-bk-area',class_='bk-area').text
            data=data.encode('raw_unicode_escape').decode()
            text =para+data
            texts=text.replace('\n','').replace('\r','').strip()

            datax.append([title,content,name,href,date,texts])
            print('get data success :{}'.format(name))
        except:
            print('get data fialed :{}'.format(name))
    pd.DataFrame(datax, columns=['title', 'connent','name','href','date','text']).to_excel('youche_baike_0325.xlsx')
    return datax


if __name__ =='__main__':
    print('test')
    #get_urls()
    #get_all_urls()
    # get_data()




