import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import time
import re
import sqlalchemy

url = "https://yp.xcar.com.cn/wiki/"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
urls=pd.DataFrame()
html = requests.get(url,headers=HEADERS).text
soup = BeautifulSoup(html,'lxml')
datas = soup.find_all('div',class_='wiki_sidebar f_l')[0]
connents = datas.find_all('div',class_ ='tree_title')
datainfos = datas.find_all('ul',class_ ='wiki_tree')
urldatas=[]
#获取所有链接
def get_urls():
    for i in range(len(connents)):
        connent = connents[i].text  # 目录名
        datainfo = datainfos[i]
        titless = datainfo.find_all(name="li", attrs={
            "id": re.compile(r"hideOrShow(\d)?")})  # titles = datainfo.find_all(name="li",id=True)
        for j in range(len(titless)):
            titles = titless[j].text.strip()
            value = titless[j]['value']
            hrefss = datainfo.find_all(name="li", attrs={"class": 'zk1 contents{}'.format(value)})
            for hrefs in hrefss:
                href = 'https:' + hrefs.find('a')['href']
                title = hrefs.find('a')['title']
                urldatas.append([connent, titles, title, href])
    return print('get url success')
#pd.DataFrame(urldatas, columns=['connent', 'titles', 'title','href']).to_excel('xcar_baike_urls.xlsx')

#获取数据，其他方法，保存备用
def get_data1():
    files = pd.read_excel('xcar_baike_urls.xlsx')
    datax = []
    for i in range(len(files)):  # len(files)
        if ((i-1) % 20 == 0):
            print('sleep 10 seconds....')
            time.sleep(10)
        basedata = files.iloc[i]
        connent = files.iloc[i]['connent']
        titles = files.iloc[i]['titles']
        title = files.iloc[i]['title']
        href = files.iloc[i]['href']
        try:
            url = href
            print(url)
            html = requests.get(url, headers=HEADERS).text
            # htmls = driver.get(url)
            soup = BeautifulSoup(html, 'lxml')
            datas = soup.find_all('div', id='smlBox', class_='wiki_box')[0]
            text = ''.join(datas.text.strip().split())
            tests = datas.text.split('\n\n\n')
            for j in range(1,len(tests)):
                if '\n\r\n' in tests[j]:
                    test = tests[j].split('\n\r\n')
                elif '\n\xa0 \xa0 \xa0 \xa0' in tests[j]:
                    test = tests[j].split('\n\xa0 \xa0 \xa0 \xa0')
                elif '\n\xa0\xa0\xa0\xa0\xa0\xa0' in tests[j]:
                    test = tests[j].split('\n\xa0\xa0\xa0\xa0\xa0\xa0')
                else:test = tests[j].split('\n\r\n')
                #print(test[0])
                question = test[0].replace('\n','')
                answer =''.join(test[1:]).strip()
                #print(answer)
                datax.append([connent, titles, title, href, question,answer])
            print('第{}个URL成功/共有{}个'.format(i, 548))
        except:
            print('第{}个URL失败'.format(i))
            pass
    pd.DataFrame(datax, columns=['connent', 'titles', 'title', 'href', 'question','answer']).to_excel('xcar_baike_0323_test.xlsx')
    return print('complete get data')
#获取数据，暂需完善#TODO:TO complete it
def get_data():
    files = pd.read_excel('xcar_baike_urls.xlsx')
    datax = []
    for i in range(len(files)):  # len(files)
        if ((i-1) % 20 == 0):
            print('sleep 10 seconds....')
            time.sleep(10)
        basedata = files.iloc[i]
        connent = files.iloc[i]['connent']
        titles = files.iloc[i]['titles']
        title = files.iloc[i]['title']
        href = files.iloc[i]['href']
        try:
            url = href
            print(url)
            html = requests.get(url, headers=HEADERS).text
            # htmls = driver.get(url)
            soup = BeautifulSoup(html, 'lxml')
            datas = soup.find_all('div', id='smlBox', class_='wiki_box')[0]
            questions = datas.find_all('em',class_='headline-content')
            answers = datas.find_all('div',class_='headline-content')
            #datax.append([connent, titles, title, href, question,answer])
            print('第{}个URL成功/共有{}个'.format(i, 548))
        except:
            print('第{}个URL失败'.format(i))
            pass
    pd.DataFrame(datax, columns=['connent', 'titles', 'title', 'href', 'question','answer']).to_excel('xcar_baike_0323_test.xlsx')
    return print('complete get data')

# 数据处理
def data_dp():
    files = pd.read_excel('xcar_baike_0323_test.xlsx')
    files.replace('nan','')
    datax = []
    for i in range(len(files)):  #len(files)
        basedata = files.iloc[i]
        connent = files.iloc[i]['connent']
        titles = files.iloc[i]['titles']
        title = files.iloc[i]['title']
        href = files.iloc[i]['href']
        question = files.iloc[i]['question']
        answer = files.iloc[i]['answer']
        if (isinstance(answer, str) is False ):
            #print(answer)
            #print(type(question),question)
            if '？' in files.iloc[i]['question'] :
                #print(files.iloc[i]['question'])
                result = re.split('？', files.iloc[i]['question'], maxsplit=1, flags=0)
                question = result[0]
                answer = result[-1]
                #print(question)
                #print(answer)
            elif '：' in files.iloc[i]['question']:
                #print(files.iloc[i]['question'])
                result = re.split('：', files.iloc[i]['question'], maxsplit=1, flags=0)
                question = result[0]
                answer = result[-1]
            elif '·' in files.iloc[i]['question']:
                #print(files.iloc[i]['question'])
                result = re.split('·', files.iloc[i]['question'], maxsplit=1, flags=0)
                question = result[0]
                answer = result[-1]
            else:
                question = files.iloc[i]['question'][0:5]
                answer=files.iloc[i]['question']

        datax.append([connent, titles, title, href, question,answer])

    pd.DataFrame(datax, columns=['connent', 'titles', 'title', 'href', 'question', 'answer']).to_excel('xcar_baike_0323.xlsx')
    return print('complete data dp')

if __name__ == '__main__':

    data_dp()





