'''
根据关键词查询前4个网站排名
Q群：170555357 
'''
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import re,redis,time,json
from pskpackage.db import *
from pskpackage.ippool import *

pool = redis.ConnectionPool(host='127.0.0.1', port=6379,  db=1)

# 根据关键词查询前4个网站排名
def rival(id, keywords,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)

    url = "http://www.baidu.com/s?wd=%s" % (str(keywords))
    try:
        data = requests.get(url, timeout=5)
    except:
        r.lpush('rival', redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")

    page_url = []

    for item in soup.find_all("div", {"id": "page"}):
        for i in item.find_all("a", limit=4):
            page_url.append(i.get("href"))

    i = 0
    count = 0
    url_list=[]
    try:
        for item in soup.find_all("div", attrs={'class': 'f13'}):
            i += 1
            if item.find("span").text:
                continue
            else:
                href = item.find('a').get('href')
                baidu_url = requests.get(url=href, allow_redirects=False)
                real_url = baidu_url.headers['Location']  # 得到网页原始地址

                real_url_reg = re.match('.+\.(com|net|cn|com.cn|net.cn)/$', real_url)
                if real_url_reg != None:
                    count += 1
                    item_title = item.find("div", attrs={'class': 'c-tools'})
                    item_title = json.loads(item_title.attrs['data-tools'])
                    url_list.append((i, item_title['title'], real_url))
    except:
        pass


    if page_url:
        for url_i in page_url:
            time.sleep(1)
            url = "http://www.baidu.com%s" % (str(url_i))
            try:
                data = requests.get(url, timeout=5)
            except:
                r.lpush('rival', redis_val)  # 失败数据写入redis
                return False

            soup = BeautifulSoup(data.text, "lxml")

            for item in soup.find_all("div", class_='f13'):
                i += 1
                try:
                    if item.find("span").text:
                        continue
                    else:
                        href = item.find('a').get('href')
                        baidu_url = requests.get(url=href, allow_redirects=False)
                        real_url = baidu_url.headers['Location']  # 得到网页原始地址
                        real_url_reg = re.match('.+\.(com|net|cn|com.cn|net.cn)/$', real_url)
                        if real_url_reg != None:
                            count += 1
                            item_title = item.find("div", attrs={'class': 'c-tools'})
                            item_title = json.loads(item_title.attrs['data-tools'])
                            url_list.append((i, item_title['title'], real_url))
                except:
                    pass


    json_data=[]
    json_url=[]
    for val in url_list:
        data = {}
        data["rank"] = val[0]
        data["title"] = val[1]
        data["url"] = val[2]
        json_url.append(data)



    json_data.append({'data':json_url,'count':count})
    json_data = json.dumps(json_data, ensure_ascii=False)
    update_rival_detail(id, json_data)
    print("更新成功")




if __name__=="__main__":

    r = redis.Redis(connection_pool=pool)
    p = Pool(2)  
    while True:
        try:
            redis_val = r.rpop("rival")
            if redis_val:
                redis_val = redis_val.decode()
                task = redis_val.split('^')
                # 如果搜索任务没有搜索标识，直接返回0
                p.apply_async(rival, args=(int(task[0]), task[1], redis_val))
        except:
            pass

    p.close()  # 关闭进程池,不在接收新的任务
    p.join()  # 等待子进程全部运行完成，执行后续操作

