# -*- coding: utf-8 -*-#
# -------------------------------------------------------------------------------
# 建立者:        博智科技  
# Name:         test1
# Description:  测试1
# Author:       yzl
# Date:         2019-02-13
# -------------------------------------------------------------------------------

import requests
import re
from bs4 import BeautifulSoup, NavigableString

# 网站
main_url = 'https://www.baidu.com'

# 搜索参数
params = {'wd': '株洲美容培训'}

# 请求头参数
headerdict = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}


# 自定义获取文本url函数
def get_findAll_urls(text):
    """
    :param text: 文本
    :return: 返回url
    """
    patt = "((http[s]?):?/?/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*,]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)|([a-zA-Z]+.\w+\.+[a-zA-Z0-9\/_]+)"
    m = re.findall(patt, text)
    # print(m)
    if m is not None and len(m) > 0:
        for http in m[0]:
            if len(http) > 2:
                return http
    else:
        return None


# 自定义获取文本手机号函数
def get_findAll_mobiles(text):
    """
    :param text: 文本
    :return: 返回手机号列表
    """
    mobiles = re.findall(r"1\d{10}", text)
    if mobiles is not None:
        return mobiles
    else:
        return None


# 或广告标识
def get_gg_flag(text):
    """
    :param text: 文本
    :return: 返回手机号列表
    """
    mobiles = re.findall(r"广告", text)
    if mobiles is not None:
        return True
    else:
        return False


# 获取发布年月
def get_year_month(txt):
    patt = "\d{4}-\d{1,2}"
    m = re.findall(patt, txt)
    if m is not None:
        return m[0]
    else:
        return None


def get_doc(url=''):
    '''
        获取网页内容
    :return:
    '''
    doc = {}
    try:
        if url:
            req = requests.get(main_url + url, params={}, headers=headerdict)
        else:
            req = requests.get(main_url + '/s', params=params, headers=headerdict)
        doc['code'] = req.status_code
        if req.status_code == 200:
            doc['html'] = req.text
        else:
            doc['html'] = None
    except Exception as e:
        doc['code'] = -1
        doc['html'] = e
    finally:
        return doc


def get_soup(html):
    '''
    beautifulsoap解析
    :param doc_html:
    :return:
    '''
    soup = None
    try:
        soup = BeautifulSoup(html, 'lxml')  # lxml html5lib
        # 取消特殊标签
        [s.extract() for s in soup(['script', 'iframe', 'style'])]
    except (SyntaxError, ImportError) as e:
        print(e)
    finally:
        return soup


def get_pages(soup):
    '''
    获取翻页链接信息
    :param soup:
    :return:
    '''
    _htmls = soup.find('div', id='page').children
    pages = []
    pageno = 1
    for tmp in _htmls:
        page = {}
        if not isinstance(tmp, NavigableString) and tmp.name.lower() != 'strong':
            tmp_dict = tmp.attrs
            # print(pageno)
            # print(tmp_dict['href'])
            if tmp_dict['href']:
                page['href'] = tmp_dict['href']
                if 'class' in tmp_dict.keys():
                    page['pageno'] = 'next'
                    pages.append(page)
                    break
                else:
                    page['pageno'] = pageno
                    pageno += 1
        else:
            # 首页第一页没有连接
            if isinstance(tmp, str):
                page['pageno'] = pageno
                page['href'] = ''
            else:
                pageno += 1
        if page:
            pages.append(page)

    return pages


# 获取广告推广信息
def get_ggtg(soup):
    ggtg_retset = soup.find_all('div', id=re.compile('^\d{1}0{1,2}\d{1}'), cmatchid=re.compile('^\d{2,3}'))
    print(len(ggtg_retset))
    ggtgs = []
    for item in ggtg_retset:
        gginfo = {}

        # 获取搜索标题
        div_tag = item.contents[0]
        # print(div_tag)
        h3_tag = div_tag.contents[0]
        # print(h3_tag)
        a_tag = h3_tag.contents[0]
        # print(a_tag)
        gginfo['title'] = a_tag.get_text()

        # 取明细数据
        div_tag = item.contents[1]
        div_next_tag = div_tag.contents[0]
        if len(div_next_tag.contents) > 1:
            # 有图片的情况
            div_img_tag = div_next_tag.contents[0]
            div_next1_tag = div_next_tag.contents[1]
            # print(div_next1_tag.contents)
            if not isinstance(div_next1_tag, list):
                continue
            next_a_tag = div_next1_tag.contents[0]
            gginfo['detail'] = ''
            if not isinstance(next_a_tag, NavigableString):
                for tmp in next_a_tag.contents:
                    if isinstance(tmp, NavigableString):
                        gginfo['detail'] += tmp.string
                    else:
                        gginfo['detail'] += tmp.get_text()
                        # print(div_text_a_tag)
            else:
                print(next_a_tag)
            gginfo['msg'] = '有图片'
            # break
        else:
            # 没有图片推广
            if not isinstance(div_next_tag, NavigableString):
                if len(div_next_tag.contents):
                    gginfo['detail'] = div_next_tag.contents[0].get_text()
                    gginfo['msg'] = '无图片'
                else:
                    gginfo['detail'] = ''
            else:
                print(div_next_tag)

        if gginfo['detail']:
            ggtgs.append(gginfo)

    print(ggtgs)
    return ggtgs


# 正则获取推广信息
def get_ggtg_re(soup):
    ggtg_retset = soup.find_all('div', id=re.compile('^\d{1}0{1,2}\d{1}'), cmatchid=re.compile('^\d{2,3}'))
    print(len(ggtg_retset))
    ggtgs = []
    for item in ggtg_retset:
        bs = BeautifulSoup(str(item),'lxml')
        # title = bs.find('h3').contents.get_text()
        print(bs.find('h3').find('a').get_text())
        # print(title)
        gg_info = item.get_text()
        print(gg_info)
        print(get_findAll_urls(gg_info))
        print(get_year_month(gg_info))


# 正则获取普通信息
def get_detail_re(soup):
    detail_retset = soup.find_all('div', id=re.compile('^\d{1}\d{0,1}'), class_='result c-container')
    print(len(detail_retset))

    index = 1
    for item in detail_retset:
        print(index)
        detail_info = item.get_text()
        print(detail_info)
        # strinfo = re.compile('\.\.\.\.\.source-icon\w+}')
        # detail_info = re.sub(strinfo,'',detail_info)
        # print(detail_info)
        # detail_info = ''
        # for item1 in item.contents:
        #     if item1.name.lower() in ['div','h3']:
        #         print(item1.name)
        #         detail_info += item1.get_text()

        print(detail_info)
        index += 1


def get_main(soup):
    '''
    获取主页区域
    :return:
    '''
    # 所有左边主显示区域
    _main_html = soup.find('div', id='content_left')
    # print(_main_html)
    if _main_html is not None:
        return True
    else:
        return False


if __name__ == '__main__':
    doc = get_doc()
    if doc['code'] == 200:
        soup = get_soup(doc['html'])
        if soup is not None:
            # 主显示区域
            main = get_main(soup)

            # ggtg = get_ggtg(soup)
            get_ggtg_re(soup)

            get_detail_re(soup)

            # 页码区域
            # pages = get_pages(soup)
            # print(pages[len(pages) - 1])
            # doc = get_doc(pages[len(pages) - 1]['href'])
            # print(doc['html'])
    else:
        print(doc['code'])
        print(doc['html'])
