# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# 建立者:        博智科技  
# Name:         spidebaidu
# Description:  百度网页资料爬取
# Author:       yzl
# Date:         2019-02-14
#-------------------------------------------------------------------------------

import requests
import re
from bs4 import BeautifulSoup, NavigableString

class Baiduspider:
    main_url = 'https://www.baidu.com'
    params = {'wd': ''} #关键字字典
    headerdict = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
    }

    # 初始化方法
    def __init__(self,keyword):
        self.params['wd'] = keyword
        self.soup = None  # BS4 对象
        self.doc = {}  # 网页文档字典
        self.pages = []  # 页码连接信息
        self.ggpms = []  # 广告排名信息
        self.details = []  # 普通信息
        self.has_main = False #是否存在左边主显示区域


    # 自定义获取文本url函数
    def get_urls(self,text):
        patt = "((http[s]?):?/?/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*,]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)|([a-zA-Z]+.\w+\.+[a-zA-Z0-9\/_]+)"
        m = re.findall(patt, text)
        if m is not None and len(m) > 0:
            for http in m[0]:
                if len(http) > 2:
                    return http
        else:
            return None


    # 自定义获取文本手机号函数
    def get_mobiles(self,text):
        mobiles = re.findall(r"1\d{10}", text)
        if mobiles is not None:
            return mobiles
        else:
            return None


    # 获取广告标识
    def get_gg_flag(self,text):
        mobiles = re.findall(r"广告", text)
        if mobiles is not None and len(mobiles) > 0:
            return True
        else:
            return False


    # 获取发布年月
    def get_year_month(self,txt):
        patt = "\d{4}-\d{1,2}"
        m = re.findall(patt, txt)
        if m is not None and len(m) > 0:
            return m[0]
        else:
            return None


    # 获取网页内容 字符串存在作为翻页用
    def get_doc(self,url='',pageno=1):
        try:
            if url:
                # 下一页
                req = requests.get(self.main_url + url, params={}, headers=self.headerdict)
            else:
                # 第一次搜索
                req = requests.get(self.main_url + '/s', params=self.params, headers=self.headerdict)
            self.pageno = pageno
            self.doc['code'] = req.status_code
            if req.status_code == 200:
                self.doc['html'] = req.text
            else:
                self.doc['html'] = None

            self.pages = []  # 页码连接信息
            self.ggpms = []  # 广告排名信息
            self.details = []  # 普通信息
            self.has_main = False  # 是否存在左边主显示区域

        except Exception as e:
            self.doc['code'] = -1
            self.doc['html'] = e


    # 解析网页文本为 bs4 对象
    def get_soup(self):
        try:
            self.soup = BeautifulSoup(self.doc['html'], 'lxml')  # lxml html5lib
            # 取消特殊标签
            [s.extract() for s in self.soup(['script', 'iframe', 'style'])]
        except (SyntaxError, ImportError) as e:
            self.soup = None
            print(e)


    # 获取翻页链接信息
    def get_pages(self):
        _htmls = self.soup.find('div', id='page').children

        if _htmls is None:
            self.pages = []
            return
        else:
            self.pages = []

        pageno = 1
        for tmp in _htmls:
            page = {}
            if not isinstance(tmp, NavigableString) and tmp.name.lower() != 'strong':
                tmp_dict = tmp.attrs
                if tmp_dict['href']:
                    page['href'] = tmp_dict['href']
                    # 下一页按钮
                    if 'class' in tmp_dict.keys():
                        page['pageno'] = 'next'
                        self.pages.append(page)
                        break
                    else:
                        page['pageno'] = pageno
                        pageno += 1
            else:
                # 首页第一页没有连接
                if isinstance(tmp, str):
                    page['pageno'] = pageno
                    page['href'] = ''
                else:
                    pageno += 1
            if page:
                self.pages.append(page)


    # 获取主页左边主显示区域是否存在
    def get_main(self):
        # 所有左边主显示区域
        main_html = self.soup.find('div', id='content_left')
        if main_html is not None:
            self.has_main = True
        else:
            self.has_main = False


    # 获取 推广排名
    def get_ggtg_re(self):
        ggtg_retsets = self.soup.find_all('div', id=re.compile('^\d{1}0{1,2}\d{1}'), cmatchid=re.compile('^\d{2,3}'))

        self.ggpms = []
        index = 1
        for item in ggtg_retsets:
            try:
                ggpm = {}
                # 获取标题
                bs_title = BeautifulSoup(str(item),'lxml')
                title = bs_title.find('h3').find('a')
                if title:
                    ggpm['title'] = title.get_text()
                else:
                    ggpm['title'] = ''
                ggpm['detail'] = item.get_text()

                # 获取连接 发布日期信息
                if ggpm['detail']:
                    tmp_url = self.get_urls(ggpm['detail'])
                    if tmp_url:
                        ggpm['url'] = tmp_url
                    else:
                        ggpm['url'] = ''
                    tmp_fbym = self.get_year_month(ggpm['detail'])
                    if tmp_fbym:
                        ggpm['fbym'] = tmp_url
                    else:
                        ggpm['fbym'] = ''

                    isgg = self.get_gg_flag(ggpm['detail'])
                    if isgg:
                        ggpm['isgg'] = '是'
                    else:
                        ggpm['isgg'] = '否'
                    ggpm['pageno'] = str(self.pageno) + '-' + str(index)
                else:
                    ggpm['url'] = ''
                    ggpm['fbym'] = ''
                    ggpm['isgg'] = 0
                    ggpm['pageno'] = ''

                if 'title'in ggpm.keys() and ggpm['title']:
                    self.ggpms.append(ggpm)

            except Exception as e:
                ggpm = {}
                print('get_ggtg_re',e)

            index += 1


    # 获取普通级快照信息
    def get_kzh_re(self):
        detail_retsets = self.soup.find_all('div', id=re.compile('^\d{1}\d{0,1}'), class_='result c-container')

        self.details = []
        index = 1
        for item in detail_retsets:
            kzhinfo = {}
            try:
                # 获取标题
                bs_title = BeautifulSoup(str(item), 'lxml')
                title = bs_title.find('h3').find('a')
                if title:
                    kzhinfo['title'] = title.get_text()
                else:
                    kzhinfo['title'] = ''

                kzhinfo['detail'] = item.get_text()
                if kzhinfo['detail']:
                    tmp_url = self.get_urls(kzhinfo['detail'])
                    if tmp_url:
                        kzhinfo['url'] = tmp_url
                    else:
                        kzhinfo['url'] = ''

                    tmp_fbym = self.get_year_month(kzhinfo['detail'])
                    if tmp_fbym:
                        kzhinfo['fbym'] = tmp_fbym
                    else:
                        kzhinfo['fbym'] = ''
                else:
                    kzhinfo['url'] = ''
                    kzhinfo['fbym'] = ''

                kzhinfo['isgg'] = '快照'

                # 所在页位置
                kzhinfo['pageno'] = str(self.pageno) + '-' + str(index)

                if 'title' in kzhinfo.keys() and kzhinfo['title']:
                    self.details.append(kzhinfo)

            except Exception as e:
                kzhinfo = {}
                print('get_kzh_re',e)
                # break

            index += 1



if __name__ == '__main__':
    bds = Baiduspider('株洲美发培训')
    bds.get_doc()
    bds.get_soup()
    bds.get_main()
    if bds.has_main:
        bds.get_pages()
        # print(bds.pages)
        pages = bds.pages
        bds.get_ggtg_re()
        bds.get_kzh_re()
        print('1-->',bds.ggpms)
        print('1-->',bds.details)
        for item in pages:
            if item['href'] and item['pageno'] != 'next':
                # print(item['href'])
                # print(item['pageno'])
                bds.get_doc(item['href'],item['pageno'])
                bds.get_soup()
                bds.get_main()
                if bds.has_main:
                    # 后面页面不必生成页码 提升性能
                    # bds.get_pages()
                    bds.get_ggtg_re()
                    bds.get_kzh_re()
                    # 后面的页面排名可以不爬取了
                    print(str(item['pageno']) + '排名',bds.ggpms)
                    print(str(item['pageno']) + '快照',bds.details)
                else:
                    continue
            else:
                continue
    else:
        print('没有数据')



