# -*- coding: UTF-8 -*-
# cython: language_level=3
import grequests
import multiprocessing
import requests
import time
from fake_useragent import UserAgent
from lxml import etree
from retry import retry

from db.mongodb import MongoDB
from db.proxy_db import ProxyDB
from utils.own_tools import get_host

db = MongoDB()
ua = UserAgent(browsers=["chrome", "edge"], os=["windows"])

pass_urls = ['.baidu.com', '.taobao.com', '.tmall.com', '.alibaba.com', '.qq.com', '.1688.com', '.12580.tv', '.56.com',
             '.yonzan.com', ',douban.com', ',bilibili.com', '.zhihu.com', '.sohu.com', '.douban.com', '.aiqiyi.com',
             '.youku.com', '.sogou.com',
             '.ailyun.com', '.alimama.com', '.360kan.com', '.weibo.com', '.meituan.com']


def grequests_baidu_list(hrefs):
    if not hrefs:
        return

    # rs = (grequests.head(url, headers={'User-Agent': ua.random}, timeout=3.01) for url in hrefs)

    rs = []
    for url in hrefs:
        proxy = ProxyDB().get_proxy()
        proxy = {'http': proxy, 'https': proxy} if proxy else None
        rs.append(grequests.head(url, headers={'User-Agent': ua.random}, timeout=3.01, proxies=proxy))

    responses = grequests.map(rs)
    for response in responses:
        try:
            url = response.url
            h = get_host(url)

        except Exception as e:
            print(e)
            continue
        if not db.add('INPUT_URLS', {"url": h['home_page']}):
            print('exist')
        else:
            print(url)


@retry(tries=100)
def search_api(keyword, num=0):
    params = {"wd": keyword, "pn": num, "rn": "50"}
    proxy = ProxyDB().get_proxy()
    proxy = {'http': proxy, 'https': proxy} if proxy else None
    response = requests.get('https://www.baidu.com/s', headers={'User-Agent': ua.random},
                            params=params, proxies=proxy, timeout=(3, 10))  # todo proxy
    response.raise_for_status()
    response.encoding = 'utf-8'

    if '<div class="timeout-title">网络不给力，请稍后重试</div>' in response.text or '<title>百度安全验证</title>' in response.text:
        # print(response.text)
        raise Exception('ban ip')
    return response.text


def add_baidu_search(keyword):
    flag_error = 0
    for num in range(0, 100000, 50):
        if flag_error > 10:
            return
        print(keyword, num)
        try:
            response = search_api(keyword, num)
            html = etree.HTML(response)
        except Exception as e:
            print('search error:', e)
            return
        todo_url_list = []
        for item in html.xpath('//div[@id="content_left"]/div'):
            url_show = item.xpath('.//h3/a/@href')
            if not url_show:
                continue
            todo_url_list.append(url_show[0])
        print(todo_url_list)
        grequests_baidu_list(todo_url_list)
        if html.xpath('//div[contains(@class,"page-inner")]'):
            if not html.xpath(
                    '//div[contains(@class,"page-inner")]/a[contains(text(),"下一页")]'):
                print(''' ----------------没有下一页了----------------''')
                break
            else:
                time.sleep(5)

        else:
            print('baidu error sleep ', flag_error * 30)
            # time.sleep(flag_error * 30)
            flag_error += 1
            continue


def baidu_main():
    p = multiprocessing.Pool(50)
    city_keywords = ['粤icp', '广州', '', '羊城', '越秀区', '荔湾区', '海珠区', '天河区', '白云区', '黄埔区', '番禺区', '花都区', '南沙区', '增城区',
                     '从化区']
    # releative_keywords = ['动漫', '动画', '教学视频，视频，电影天堂，电视剧，纪录片', '短视频', '微视频', '微电影', '教育视频', '新闻视频', '音乐视频', '舞蹈视频',
    #                       '竞技视频', '比赛视频', '体育视频', '娱乐视频', '在线播放', '在线观看，正在播放']
    # releative_keywords = [
    #     '视频', '电影', '电视剧', '综艺', '纪录片', '动画', '动漫', '国漫', '日漫',
    #     '流行首播', '热播', '在线播放', '在线观看', '正在播放',
    #     '大陆', '港台', '欧美', '日韩', '美剧', '韩剧', '华语', '热舞', '嘻哈', '摇滚',
    #     'dytt', '电影天堂'
    # ]
    releative_keywords = ['比赛', '综艺', '日漫', 'dytt', '动漫', '音乐', '微电影', '在线', '欧美', '舞蹈', '观看', '新闻', '日韩', '播放', '动画',
                          '热舞',
                          '电影', '国漫', '电视剧', '港台', '韩剧', '教育', '视频 ', '嘻哈', '华语', '纪录片', '大陆', '体育', '美剧', '摇滚', '首播',
                          '娱乐',
                          '热播', '电影天堂',
                          '1024', '4480yy', '6080', '91', '99', 'AV', 'Av', 'HEZYO', 'aV', 'ag777', 'av', 'bbin',
                          'porm',
                          'pron', '一本道', '一道', '丁香', '下马', '不卡', '不要', '东京', '丝瓜', '丝袜', '久久', '久草', '乱伦', '五月天', '人人爱',
                          '人妇', '人妻', '人成', '伊人', '作爱', '做受', '做爱', '偷偷', '偷拍', '偷窥', '兽兽', '凌辱', '加勒比', '口交', '口工',
                          '同居',
                          '啪啪', '喷水', '四虎', '夜夜', '女厕', '奶子', '奶罩', '好色', '婷婷', '射死', '小姐', '尹人', '巨乳', '强奷', '很鲁视',
                          '快猫',
                          '性交', '性爱', '性生活', '情色', '成人版', '成版人', '我爱大', '操逼', '无码', '日日夜夜', '最污', '杳蕉', '毛片', '波多野',
                          '熟妇',
                          '爱色', '特黄', '狠狠', '男朋友', '私处', '级片', '结衣', '缴情', '自偷', '自慰', '自拍', '自那慰', '色区', '色原', '色噜噜',
                          '色情',
                          '色网', '色色', '草网', '菠萝', '萝莉', '衬衫', '要色', '超碰', '轮奸', '青娱乐', '青青草', '韩私', '高潮', '鲁一鲁', '鲁啪',
                          '黄片',
                          '黄瓜', '黄短', '黄色录像', '黄色片', '黄色电影', '黄频']

    print("-----start-----")
    # city_keywords.reverse()
    for city_keyword in city_keywords:
        for releative_keyword in releative_keywords:
            keywords = releative_keyword + ' ' + city_keyword
            p.apply_async(add_baidu_search, (keywords,))
    p.close()
    p.join()
    print("-----end-----")


if __name__ == '__main__':
    baidu_main()
    # add_baidu_search('视频 粤icp')
