import re

import requests
from bs4 import BeautifulSoup
from helium import *
from datetime import datetime

from selenium.common import TimeoutException
from selenium.webdriver.chrome import webdriver

from moxi_server.src import getLogger
from moxi_server.src.Common import *

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0',
           'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
           'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
           'Accept-Encoding': 'gzip, deflate',
           }

logger = getLogger(__name__)


def getDate() -> tuple[int, int, int, int]:
    """
    获取今天的日期，当对方提起今天几号类似这样的话时调用
    (Get today's date, used when the other party mentions something like what day is today)
    :return: 将返回一个元组，包含了年，月，日，星期（星期一为0，一直到星期天为6）
    """
    logger.info("获取今天日期...")
    now = datetime.now()
    return now.year, now.month, now.day, now.weekday()


def getNowTime() -> tuple[int, int, int, int]:
    """
    获取当前所在地区的时间，当对方询问具体时间时调用，例如”现在几点了“，如果有必要，可以提供微秒
    (get the time in the locale, called when someone asks you the time, you can add the microsecond suffix if necessary)
    :return: 返回一个元组，包含小时，分钟，秒，微秒
    """
    logger.info("获取当前时间...")
    now = datetime.now()
    return now.hour, now.minute, now.second, now.microsecond


def searcher(search: str) -> list[dict[str, str]] | str:
    """
    提供搜索内容，搜索后返回结果，当提问者询问诸如"帮我找找...的资料"或者“帮我查查...”时调用，由AI根据提问者进一步的要求告诉返回值一部分或全部的内容，并可以连用loadPage(link)函数对网页分析处理的更细致化，保证答案的准确性，如果出现问题，函数会返回"查询错误"，此时需要AI再次调用函数再试一次
    (Provide search content and return results after the search. When the questioner asks questions such as "help me find information about..." or "help me check...",
the AI tells part or all of the content of the return value according to the questioner's further requirements, and can be used together with the loadPage(link) function to analyze and process the web page in more detail to ensure the accuracy of the answer.
If there is a problem, the function will return "Query error". At this time, the AI needs to call the function again and try again.)
    :param search: 搜索内容
    :return: 返回搜索内容，格式为列表，列表的每一项都是一个搜索结果，包括标题，和链接
    """
    result = []
    logger.info(f"开始查询 -> 查询内容：{search}")

    def get_bing_url(keywords):
        keywords = keywords.strip('\n')
        bingUrl = re.sub(r'\s', '+', re.sub(r'^', 'https://www.bing.com/search?q=', keywords))
        return bingUrl

    bing_url = get_bing_url(search)

    go_to(bing_url)

    refresh()
    try:
        wait_until(S(".sb_count").exists, timeout_secs=30)
    except TimeoutException as e:
        logger.warning(f"网页获取出现问题，正在重新查询 -> {e}: {e.args}")
        return searcher(search)
    html = get_driver().page_source
    soup = BeautifulSoup(html, 'lxml')
    # 获取分页
    pages = soup.find('ul', {'class': 'sb_pagF'})
    try:
        pages_num = len(pages.find_all('li'))
        logger.debug(f"已获取 {pages_num} 页数据")
        for i in range(1, pages_num):
            if i != 1:
                url = bing_url + '&qs=ds&first=' + str((i * 10) - 1) + '&FORM=PERE'
                go_to(url)
                refresh()
                html = get_driver().page_source
                soup = BeautifulSoup(html, 'lxml')
            h2s = soup.find_all('h2')
            for h2 in h2s:
                a = h2.find('a')
                if a is not None:
                    result.append({"title": h2.text.encode('utf-8').decode("utf-8"), "link": a.get('href')})
        return result
    except Exception as e:
        logger.warning(f"错误翻页，疑似网页获取出现问题，正在重新查询 -> {e}: {e.args}")
        return searcher(search)


def loadPage(link: str) -> str:
    """
    根据提供的链接返回整个网页的HTML源代码，由AI鉴别返回和提问者问题有关的正文内容，当提问者提出需要某个链接的具体内容，或者帮其查看链接内的主要内容时调用该函数，并返回和询问者问题有关的内容，如果链接无法访问，就会返回"网页丢失了，获取不到内容"的结果，也要如实告知询问者
    (Given a link, return the entire web page’s HTML source code, and identify the main content related to the asker’s question by AI. When the asker asks for the specific content of a link or asks to view the main content of the link, call this function and return the content related to the asker’s question. If the link is inaccessible, it will return the result of “The web page is lost and the content cannot be obtained”, and the asker should also be informed.)
    :param link: 网页链接
    :return: 网页源码或错误
    """
    logger.info(f"正在解析网页：解析链接：{link}")
    try:
        go_to(link)
        html = get_driver().page_source
        return html
    except TimeoutError:
        return "网页丢失，获取不到内容"


def getWeather(city: str, extensions: str) -> str:
    """
    用来获取天气预报数据，当同时有省、市、区（县）同时出现两个以上时，用最小的区域搜索，例如福建省莆田市城厢区，用城厢区搜索即可，当用户需要获取当天的实况天气的时候，例如对方需要叫你查查哪里的天气，需要让extensions传入base，预报天气传入all，如果输入不规范，函数会返回"城市输入不规范"这句话，此时需要AI自动调整城市名称再试一次，如果"查询失败，请稍后再试"，请如实告知，
    :param city: 城市名，一定要完整，如对方问福建厦门的天气，传入的一定是厦门市，也可以传入区或者县，只要名字完整即可也就是带”市“,"县"等字眼
    :param extensions: 只有两个值，base或者all，前者是实况天气，后者是预报天气
    :return:按照以下格式返回：
            如果是预报数据(extensions=base)：list[dict] 结构，按顺序为当天、第二天、第三天的预报数据
            date 日期
            week 星期几
            dayweather 白天天气现象
            nightweather 晚上天气现象
            daytemp 白天温度
            nighttemp 晚上温度
            daywind 白天风向
            nightwind 晚上风向
            daypower 白天风力
            nightpower 晚上风力
            如果是实况数据(extensions=all)：list[dict] 结构，
            province 省份名
            city 城市名
            adcode 区域编码
            weather 天气现象（汉字描述）
            temperature 实时气温，单位：摄氏度
            winddirection 风向描述
            windpower 风力级别，单位：级
            humidity 空气湿度
            reporttime 数据发布的时间
    """
    logger.info(f"查询天气中 -> 查询城市：{city}, 查询方式：{extensions}")
    api_key = getUserConfig()["amap_api_key"]
    url = "https://restapi.amap.com/v3/weather/weatherInfo?"

    city_code = getPosition(city)

    if city_code is None:
        logger.warning(f"城市输入不规范：{city}")
        return getWeather(city, extensions)

    data = {
        "city": city_code,
        "key": api_key,
        "extensions": extensions,
        "output": "JSON"
    }

    response = requests.get(url, params=data, headers=headers)
    json_data = response.json()
    if not (response.status_code == 200 and json_data["infocode"] == "10000"):
        logger.error(f"查询失败，请稍后再试：{json_data["info"]}")
        return "查询失败，请稍后再试"
    if extensions == "all":  # 预报
        return json_data["forecasts"][0]["casts"]
    else:
        return json_data["lives"]


def __start_web__():
    logger.info("启动搜索浏览器...")
    headless = True
    if headless:
        options = webdriver.Options()
        options.add_argument("--headless=True")  # 使用无头模式
        driver = webdriver.WebDriver(options=options)
    else:
        driver = webdriver.WebDriver()
    set_driver(driver)
    go_to("www.baidu.com")
    logger.info("启动完成...")


def __kill_browser__():
    logger.info("正在关闭浏览器")
    kill_browser()
    logger.info("已关闭浏览器")
