# -*- coding: utf-8 -*-
import time
import random
import requests
import json
from flask import jsonify
from bs4 import BeautifulSoup
from .config import Config
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

def make_response(data=None, message="Success", status=200):
    response = {
        "status": status,
        "message": message,
        "data": data
    }
    return jsonify(response), status

def random_user_agent():
    """随机选择User-Agent"""
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
    ]
    return random.choice(user_agents)

def get_proxies():
    """获取代理（如果有的话，可以使用免费代理）"""
    return {
        "http": "http://106.227.87.11:3128",
        "https": "http://106.227.87.11:3128"
    }

def search_web_link(query):
    subscription_key = Config.LINK_SEARCH_KEY
    search_url = Config.LINK_SEARCH_URL
    headers = {
        "Content-Type": "application/json",
        "Authorization": subscription_key
    }
    body = {
        "app_code": "B13Ax0TZ",
        "messages": [
            {
                "role": "user",
                "content": query
            }
        ]
    }

    response = requests.post(search_url, headers=headers, json=body)
    response.raise_for_status()
    search_results = response.json()
    return search_results

def xiaozhi_response(query):
    search_url = Config.XIAOZHI_URL
    body =  {
        "model": "llama3",
        "messages": [{"role": "user", "content": query}]
    }

    response = requests.post(search_url, json=body)
    response.raise_for_status()
    search_results = response.json()
    return search_results

def search_with_retries(url, headers, params, max_retries=3, delay_range=(1, 3)):
    session = requests.Session()
    retry = Retry(
        total=max_retries,
        read=max_retries,
        connect=max_retries,
        backoff_factor=0.3,
        status_forcelist=(429, 500, 502, 503, 504),
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)

    try:
        response = session.get(url, headers=headers, params=params, proxies=get_proxies())
        response.raise_for_status()
        time.sleep(random.uniform(*delay_range))  # 随机延时
        return response
    except requests.exceptions.RequestException as e:
        print(f"Error during search: {e}")
        return None

def get_bing_search_results(query):
    base_url = "https://cn.bing.com/search"
    headers = {'User-Agent': random_user_agent()}
    params = {'q': query}

    response = search_with_retries(base_url, headers, params)
    if not response:
        return []

    soup = BeautifulSoup(response.text, 'lxml')
    b_results = soup.find(id="b_results")
    results = []

    if b_results:
        for item in b_results.find_all('li', {'class': 'b_algo'}):
            title = item.find('h2').get_text(strip=True) if item.find('h2') else "No Title"
            link = item.find('a')['href'] if item.find('a') else "No Link"
            description = item.find('p').get_text(strip=True) if item.find('p') else "No Description"
            results.append({
                "title": title,
                "link": link,
                "description": description
            })
    return results[:5]

def get_baidu_search_results(query):
    base_url = "https://www.baidu.com/s"
    headers = {'User-Agent': random_user_agent()}
    params = {'wd': query}

    response = search_with_retries(base_url, headers, params)
    if not response:
        return []

    soup = BeautifulSoup(response.text, 'lxml')
    results = []

    for item in soup.find_all('div', {'class': lambda value: value and 'c-container' in value.split()}):
        title = item.find('h3').get_text(strip=True) if item.find('h3') else "No Title"
        link = item.find('a')['href'] if item.find('a') else "No Link"
        description = item.find('p').get_text(strip=True) if item.find('p') else "No Description"
        results.append({
            "title": title,
            "link": link,
            "description": description
        })

    return results[:5]
# 博查搜索插件
def get_bochaai_search_results(query):
    """BochaAI 搜索"""
    url = "https://api.bochaai.com/v1/web-search"
    payload = json.dumps({
       "query": query,
       "freshness": "oneYear",
       "count": 5
    })
    headers = {
       'Authorization': 'Bearer '+Config.BOCHAAI_KEY,
       'User-Agent': random_user_agent(),
       'Content-Type': 'application/json'
    }

    response = requests.post(url, headers=headers, data=payload)
    if response.json()["code"] == 403:
        return "我没有搜索到内容,你可以自主回答你知识库内容"
    response.raise_for_status()
    return response.json()["data"]["webPages"]["value"]

if __name__ == "__main__":
    # 调用 BochaAI 搜索函数进行测试
    bochaai_results = get_bochaai_search_results("2024年阿里巴巴的esg报告")
    print("BochaAI Search Results:", bochaai_results)
