
import logging
from multiprocessing.pool import ThreadPool
from urllib import parse

import pandas as pd
import requests
from bs4 import BeautifulSoup

import pprint
pp=pprint.PrettyPrinter(indent=4)
import requests

def format(key):
    #"https://baike.baidu.com/search?word=%E7%BE%8E%E5%9B%A2&pn=0&rn=0&enc=utf8"
    #url = 'http://www.baidu.com/s?rtt=1&bsst=4&cl=4&tn=news&ie=utf-8&word={}'.format(parse.quote(key))
    url = 'https://baike.baidu.com/search?word={}&pn=0&rn=0&enc=utf8'.format(parse.quote(key))
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Host': 'baike.baidu.com',
        'Pragma': 'no-cache',
        'Referer': "https://baike.baidu.com/item/{}".format(parse.quote(key)),
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'
    }
    return url, headers

def baike_search(keyword = '美团'):
    url ,headers = format(keyword)
    resp = requests.get(url=url, headers=headers)
    if not resp.status_code == 200:
        print(resp.status_code)
    resp.encoding = 'utf-8'

    res = resp.text
    #print(res)
    return res

#print(res)
def get_baike_items(res):
    """
    传入百度百科条目列表页面，
    :param res:
    :return:
    """
    items = []
    soup = BeautifulSoup(res, 'lxml')
    data_list = []
    try:
        for div in soup.find_all('div',{"class":"searchResult"}):
            for ix,dd in enumerate(div.find_all('dd')):
                link = dd.find('a').get('href')
                if "http" not in link:
                    link = "https://baike.baidu.com"+link
                name = dd.find('a').get_text()
                desp = dd.find('p').get_text()
                date = dd.find('span').get_text()
                # print(link,name,desp,date)
                # print(dd)
                items.append({
                    'link':link,
                    'name':name,
                    'desp':desp,
                    'date':date,
                })
    except Exception as e:
        pass
    return items

class SearchInformation:
    """
        :keywords  列表  可同时多个查询
        :proxies  可用代理 {
            'http': 'http://192.168.0.1:0000',
            'https': 'https://192.168.0.1:0000',
        }
        :processes  开启的线程数
    """
    def __init__(self, keywords=None, proxies=None, processes=10):
        self.keywords = keywords
        self.proxies = proxies
        self.processes = processes

    # 访问层
    def _request(self, key, retry):
        try:
            key_info = self._format(key)
            if self.proxies:
                resp = requests.get(url=key_info[0], headers=key_info[1], proxies=self.proxies, timeout=5)
            else:
                resp = requests.get(url=key_info[0], headers=key_info[1], timeout=5)

            if not resp.status_code == 200:
                return resp.status_code

            resp.encoding = 'utf-8'
            return self._parse(resp.text)
        except TimeoutError:
            if retry < 3:
                return self._request(key, retry + 1)
        except Exception as e:
            logging.warning(e)

    # 多线程
    def thread_req(self, Async=True):
        results = []
        pool = ThreadPool(processes=self.processes)

        thread_list = []
        for key in self.keywords:
            if Async:
                out = pool.apply_async(func=self._request, args=(key, 0,))  # 异步
            else:
                out = pool.apply(func=self._request, args=(key, 0,))  # 同步
            thread_list.append(out)

        pool.close()
        pool.join()

        # 获取输出结果
        if Async:
            for p in thread_list:
                thread_result = p.get()  # get会阻塞
                if thread_result is None:
                    return None
                results.append([i for i in thread_result if i])

        return results

    # 请求头
    @staticmethod
    def _format(key):
        url ,headers = format(key)
        return url, headers

    # 解析
    @staticmethod
    def _parse(pagesource):

        data_list = get_baike_items(pagesource)

        return data_list




def get_company_matched(things):
    if things == []:
        return []
    for thing in things:
        if '公司' in thing['name'] or '集团' in thing['name']  :
            thing['is_company'] = True
            break
        desp = thing['desp'][:10]
        if '公司' in thing['name'] or '集团' in thing['name']  :
            thing['is_company'] = True
    companys = [thing['name'].replace(' - 百度百科','').replace('_百度百科','') for thing in things if thing.get('is_company')==True]
    return companys

def get_company_std(kw):
    content = baike_search(kw)
    things = get_baike_items(content)
    company = get_company_matched(things)
    return company



# res = get_company_std('平安科技')
# print(res)