# -*- coding: utf-8 -*-
import asyncio
import os
import sys
import urllib.parse
from urllib.parse import quote
import aiohttp
from bs4 import BeautifulSoup
from colorama import Fore

# 添加项目根目录到 sys.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import config


googleheaders=config.googleheaders
timeout = aiohttp.ClientTimeout(
    total=None,  # 总超时
    sock_connect=8,  # 连接超时时间8秒
    sock_read=8  # 读取超时为8秒
)

async def getgoogle(url, session, proxy):
    url_list = []
    title_list = []
    async with session.get(url, headers=googleheaders, proxy=proxy,timeout=timeout) as resp:
        # print("正在爬取url:" + url)
        try:
            a_text= await resp.text()
            # print(a_text)
            soup = BeautifulSoup(a_text, "lxml")
            h3_tags = soup.select('h3.LC20lb.MBeuO.DKV0Md')
            a_tags= soup.select('div.yuRUbf a[class!="fl iUh30"]')
            # print(a_tags)
            if not a_tags:
                # 检查是否需要处理 CAPTCHA
                if 'CAPTCHA' in a_text:
                    print("检测到 CAPTCHA，请重新写入cookie")
                    return [],[]
            for a_tag, h3 in zip(a_tags, h3_tags):
                title=h3.text.replace('\n', '').replace(',', ' ').strip()
                href=a_tag.get('href')
                if not href.startswith(('http://', 'https://')):
                    domain = 'https://www.google.com//'
                    href = urllib.parse.urljoin(domain, href)
                print(title, "  ", href)
                url_list.append(href)
                title_list.append(title)
            return   url_list,title_list
        except Exception as e:
            print(e)
            print(f"谷歌页面爬取失败,{url}该url无法正常获取数据。")
            return [],[]


async def google_spinder(keyword,num,proxy):
    """
    :param keyword:
    :param num(str)
    :param proxy:
    :return:
    """
    print(f'谷歌爬取任务进行中,爬取页数为{num}...')
    print('标题                  url')
    urllist = []
    titlelist = []
    tasks = []
    if ':' in num:
        if num.count(':') > 1:
            raise ValueError("输入中必须且只能包含一个 ':'")
        else:
            # 分割字符串，确保分割后的两部分都是数字
            start_page, end_page = num.split(':')
            # 判断两边是否都是数字
            if not (start_page.isdigit() and end_page.isdigit()):
                raise ValueError("':' 两侧的值必须是数字")
            else:
                start_page = (int(start_page)-1)*10
                end_page = (int(end_page))*10
    else:
        start_page, end_page =0,int(num) * 10
    async with aiohttp.ClientSession() as session:
        for n in range(start_page, end_page, 10):
            url = f'https://www.google.com.hk/search?q={keyword}&start={n}'
            tasks = tasks + [asyncio.create_task(getgoogle(url, session, proxy))]
        result = await asyncio.gather(*tasks)
    for i in range(int((end_page-start_page)/ 10)):
        urllist += result[i][0]
        titlelist += result[i][1]
    count=len(urllist)
    print(f"谷歌搜索爬取结果为{count}")
    print(Fore.GREEN + '谷歌爬取任务完成\n' + Fore.RESET)
    return titlelist, urllist

# google爬虫入口函数
def google_main(keyword,num):
    proxy = config.proxy
    keyword=quote(keyword)
    if sys.platform.startswith('win'):
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
    loop = asyncio.get_event_loop()
    return loop.run_until_complete(google_spinder(keyword,num,proxy))


async def Google_main(keywords, num):
    return await google_spinder(keywords, num)

