'''
多进程查找前10页排名 
包含 百度电脑，百度手机，360，搜狗，搜狗手机，神马
Q群：170555357 
'''

from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import re,redis,time
from pskpackage.db import *
from pskpackage.ippool import *


pool = redis.ConnectionPool(host='127.0.0.1', port=6379,  db=1)
#百度查询网站排名
def BaiduSearch(id,keywords,mark,lyc,member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)

    url = "http://www.baidu.com/s?wd=%s" % (str(keywords))
    try:
        #data = requests.get(url,proxies=proxies, timeout=5)
        data = requests.get(url, timeout=5)
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")

    page_url=[]

    for item in soup.find_all("div",{"id": "page"}):
        for i in item.find_all("a",limit=9):
            page_url.append(i.get("href"))

    i = 0
    ranking = 0
    try:
        for item in soup.find_all("div", attrs={'class': 'f13'}):
            i += 1
            if item.find("span").text:
                item_mark = item.find("span").text
            else:
                item_mark = item.find("a").text

            item_mark = re.search(mark, item_mark)
            if item_mark:
                if str(item_mark.group().strip()) == str(mark.strip()):
                    ranking = i
                    break
    except:
        pass

    # 第一页没有翻页继续查找
    if int(ranking) == 0 and page_url:
        for url_i in page_url:
            time.sleep(1)
            url = "http://www.baidu.com%s" % (str(url_i))
            try:
                #data = requests.get(url,proxies=proxies, timeout=5)
                data = requests.get(url, timeout=5)
            except:
                r.lpush(task_item, redis_val)  # 失败数据写入redis
                return False
            soup = BeautifulSoup(data.text, "lxml")
            
            for item in soup.find_all("div", class_='f13'):
                i += 1
                try:
                    if item.find("span").text:
                        item_mark = item.find("span").text
                    else:
                        item_mark = item.find("a").text

                    item_mark = re.search(mark, item_mark)
                    if item_mark:
                        if str(item_mark.group().strip()) == str(mark.strip()):
                            ranking = i
                            break

                    # 找到就退出循环
                    if int(ranking) != 0:
                        break

                except:
                    pass

            if int(ranking) != 0:
                break
    update_keyword_deduction_log(id, ranking, lyc,member_price,agent_price,type)
    print("更新成功")

#百度手机端网站排名
def Baidu_wapSearch(id, keywords, mark,lyc, member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)
    url = "http://www.78901.net/wap/"
    data = {"m": mark, "yd": keywords, "submit": "查询"}
    try:
        # data = requests.get(url,proxies=proxies, timeout=5)
       return_data= requests.post(url, data=data,timeout=60)
       return_data.encoding = 'utf-8'
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(return_data.text, "lxml")
    p = soup.find_all("p")
    try:
        html_p = str(p[2])
        page = re.findall("第(.+)页", html_p)
        ranking = re.findall("页的第(.+)个", html_p)
        ranking = (int(page[0])*10)-10+int(ranking[0])
        update_keyword_deduction_log(id,ranking,lyc, member_price,agent_price,type)
    except:
        update_keyword_deduction_log(id, 0,lyc,member_price,agent_price,type)
    print("更新成功")


#360查询网站排名
def SoSearch(id,keywords,mark,lyc,member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)
    url = "https://www.so.com/s?q=%s"%(str(keywords))
    try:
        data = requests.get(url, timeout=5)
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")

    page_url=[]

    for item in soup.find_all("div",{"id": "page"}):
        for i in item.find_all("a",limit=9):
            page_url.append(i.get("href"))

    i = 0
    ranking = 0
    try:
        for item in soup.find_all("p", attrs={'class': 'res-linkinfo'}):
            i += 1
            item_mark = item.find("cite").text
            item_mark = re.search(mark, item_mark)
            if item_mark:
                if str(item_mark.group().strip()) == str(mark.strip()):
                    ranking = i
                    break
    except:
        pass


    # 第一页没有翻页继续查找
    if int(ranking) == 0 and page_url:
        for url_i in page_url:
            url = "https://www.so.com%s" % (str(url_i))
            try:
                data = requests.get(url, timeout=5)
            except:
                r.lpush(task_item, redis_val)  # 失败数据写入redis
                return False

            soup = BeautifulSoup(data.text, "lxml")
            for item in soup.find_all("p", attrs={'class': 'res-linkinfo'}):
                i += 1
                item_mark = item.find("cite").text
                item_mark = re.search(mark, item_mark)
                if item_mark:
                    if str(item_mark.group().strip()) == str(mark.strip()):
                        ranking = i

                #找到就退出循环
                if int(ranking) != 0:
                    break

    update_keyword_deduction_log(id, ranking,lyc, member_price, agent_price,type)
    print("更新成功")


#搜狗查询网站排名
def SoGouSearch(id,keywords,mark,lyc,member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)
    url = "https://www.sogou.com/web?query=%s"%(str(keywords))
    try:
        data = requests.get(url, timeout=5)
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")

    page_url=[]

    for item in soup.find_all("div",{"id": "pagebar_container"}):
        for i in item.find_all("a",limit=9):
            page_url.append(i.get("href"))
    i = 0
    ranking = 0
    try:
        for item in soup.find_all("div", attrs={'class': 'fb'}):
            i += 1
            item_mark = item.find("cite").text
            item_mark = re.search(mark, item_mark)
            if item_mark:
                if str(item_mark.group().strip()) == str(mark.strip()):
                    ranking = i
                    break
    except:
        pass


    # 第一页没有翻页继续查找
    if int(ranking) == 0 and page_url:
        for url_i in page_url:
            url = "https://www.sogou.com/web%s" % (str(url_i))
            try:
                data = requests.get(url, timeout=5)
            except:
                r.lpush(task_item, redis_val)  # 失败数据写入redis
                return False

            soup = BeautifulSoup(data.text, "lxml")
            for item in soup.find_all("div", attrs={'class': 'fb'}):
                i += 1
                item_mark = item.find("cite").text
                item_mark = re.search(mark, item_mark)
                if item_mark:
                    if str(item_mark.group().strip()) == str(mark.strip()):
                        ranking = i

                #找到就退出循环
                if int(ranking) != 0:
                    break

    update_keyword_deduction_log(id, ranking,lyc, member_price, agent_price,type)
    print("更新成功")


#神马查询网站排名
def ShenMaSearch(id,keywords,mark,lyc,member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)
    url = "https://m.sm.cn/s?q=%s&from=smor"%(str(keywords))
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.6.0.18627',
    }
    try:
        data = requests.get(url,headers=headers,timeout=5)
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")

    page_url = []
    for item_url in range(2, 10):
        page_url.append(url + "&page=%s" % (str(item_url)))

    i = 0
    ranking = 0
    try:
        for item in soup.find_all("div", attrs={'class': 'article ali_row'}):
            if item.find("div", attrs={'class': 'other'}).find("span"):
                continue

            i += 1
            item_mark = item.find("div",attrs={'class':'other'}).text
            item_mark = re.search(mark, item_mark)
            if item_mark:
                if str(item_mark.group().strip()) == str(mark.strip()):
                    ranking = i
                    break
    except:
        pass



    # 第一页没有翻页继续查找
    if int(ranking) == 0 and page_url:
        for url_i in page_url:
            url = url_i
            try:
                data = requests.get(url,headers=headers,timeout=5)
            except:
                r.lpush(task_item, redis_val)  # 失败数据写入redis
                return False

            soup = BeautifulSoup(data.text, "lxml")
            for item in soup.find_all("div", attrs={'class': 'article ali_row'}):

                if item.find("div", attrs={'class': 'other'}).find("span"):
                    continue
                i += 1
                item_mark = item.find("div", attrs={'class': 'other'}).text
                item_mark = re.search(mark, item_mark)
                if item_mark:
                    if str(item_mark.group().strip()) == str(mark.strip()):
                        ranking = i
                        break

                #找到就退出循环
                if int(ranking) != 0:
                    break

            # 找到就退出循环
            if int(ranking) != 0:
                break

    update_keyword_deduction_log(id, ranking,lyc,member_price, agent_price,type)
    print("更新成功")


#搜狗手机端查询网站排名
def SoGou_wapSearch(id,keywords,mark,lyc,member_price,agent_price,type,task_item,redis_val):
    global pool
    r = redis.Redis(connection_pool=pool)
    url = "https://m.sogou.com/web/searchList.jsp?keyword=%s"%(str(keywords))

    try:
        data = requests.get(url,timeout=5)
    except:
        r.lpush(task_item, redis_val)  # 失败数据写入redis
        return False

    soup = BeautifulSoup(data.text, "lxml")
    page_url = []
    for item_url in range(2, 10):
        page_url.append(url + "&p=%s" % (str(item_url)))

    i = 0
    ranking = 0
    try:
        for item in soup.find_all("div", attrs={'class': 'result'}):
            i += 1
            item_mark = item.find("div",attrs={'class':'citeurl'}).text
            item_mark = re.search(mark, item_mark)
            if item_mark:
                if str(item_mark.group().strip()) == str(mark.strip()):
                    ranking = i
                    break

    except:
        pass


    # 第一页没有翻页继续查找
    if int(ranking) == 0 and page_url:
        for url_i in page_url:
            url = url_i
            try:
                data = requests.get(url,timeout=5)
            except:
                r.lpush(task_item, redis_val)  # 失败数据写入redis
                return False

            soup = BeautifulSoup(data.text, "lxml")
            for item in soup.find_all("div", attrs={'class': 'result'}):
                i += 1
                item_mark = item.find("div", attrs={'class': 'citeurl'}).text
                item_mark = re.search(mark, item_mark)
                if item_mark:
                    if str(item_mark.group().strip()) == str(mark.strip()):
                        ranking = i
                        break

                #找到就退出循环
                if int(ranking) != 0:
                    break

            # 找到就退出循环
            if int(ranking) != 0:
                break

    update_keyword_deduction_log(id,ranking,lyc,member_price,agent_price,type)
    print("更新成功")


if __name__ == "__main__":

    r = redis.Redis(connection_pool=pool)
    p = Pool(2)  
    while True:
        try:
            task = r.keys('task_*')
            if len(task) != 0:
                for task_item in task:
                    task_item = task_item.decode()
                    if r.llen(task_item) != 0:
                        redis_val = r.rpop(task_item)
                        if redis_val:
                            redis_val = redis_val.decode()
                            task = redis_val.split('^')

                            if task[3] == "baidu":
                                p.apply_async(BaiduSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5],task[6],task_item,redis_val))
                            elif task[3] == "so":
                                p.apply_async(SoSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5], task[6],task_item,redis_val))
                            elif task[3] == "sogou":
                                p.apply_async(SoGouSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5], task[6],task_item,redis_val))
                            elif task[3] == "shenma":
                                p.apply_async(ShenMaSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5], task[6],task_item,redis_val))
                            elif task[3] == "baidu_wap":
                                p.apply_async(Baidu_wapSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5], task[6],task_item,redis_val))
                            elif task[3] == "sogou_wap":
                                p.apply_async(SoGou_wapSearch, args=(
                                int(task[0]), task[1], task[2], task[3], task[4], task[5], task[6],task_item,redis_val))
        except:
            pass


    p.close()  # 关闭进程池,不在接收新的任务
    p.join()  # 等待子进程全部运行完成，执行后续操作
  