import gradio as gr
import jieba.analyse
import requests
from bs4 import BeautifulSoup
from litellm import completion
import re

def search_and_extract(keywords):
    """从百度和必应搜索关键词并提取网页内容"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
    }
    
    def search_baidu(keywords):
        search_url = f"https://www.baidu.com/s?wd={'+'.join(keywords)}"
        try:
            response = requests.get(search_url, headers=headers)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            search_results = []
            for link in soup.find_all('a', class_='c-showurl'):  # 百度搜索结果的URL class
                url = link.get('href', '')
                if url.startswith('http'):
                    try:
                        # 获取真实URL（百度搜索结果是重定向链接）
                        real_url = requests.get(url, headers=headers, timeout=3, allow_redirects=True).url
                        if not real_url.startswith(('https://www.baidu.com', 'http://www.baidu.com')):
                            search_results.append(real_url)
                            if len(search_results) >= 2:  # 限制只处理前2个结果
                                break
                    except:
                        continue
            return search_results
        except:
            return []

    def search_bing(keywords):
        search_url = f"https://cn.bing.com/search?q={'+'.join(keywords)}"
        try:
            response = requests.get(search_url, headers=headers)
            soup = BeautifulSoup(response.text, 'html.parser')
            
            search_results = []
            for link in soup.find_all('a'):
                url = link.get('href', '')
                if url.startswith('http') and not url.startswith(('https://cn.bing.com', 'http://cn.bing.com')):
                    search_results.append(url)
                    if len(search_results) >= 10:  # 限制只处理前2个结果
                        break
            return search_results
        except:
            return []

    # 获取两个搜索引擎的结果
    baidu_results = search_baidu(keywords)
    bing_results = search_bing(keywords)
    
    # 合并搜索结果
    all_urls = list(set(baidu_results + bing_results))
    
    # 提取网页内容
    context = []
    for url in all_urls:
        try:
            page = requests.get(url, headers=headers, timeout=5)
            page.encoding = page.apparent_encoding  # 自动检测编码
            page_soup = BeautifulSoup(page.text, 'html.parser')
            # 移除script和style标签
            for script in page_soup(["script", "style"]):
                script.decompose()
            text = page_soup.get_text()
            # 清理文本
            text = re.sub(r'\s+', ' ', text).strip()
            context.append(text[:1000])  # 限制每个页面提取的文本长度
        except:
            continue
            
    return {
        'content': '\n\n---\n\n'.join(context),
        'urls': all_urls
    }

def chat(message, history):
    """处理用户输入并返回AI回答"""
    # 提取URL
    urls = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message)
    
    # 获取URL内容
    url_contents = []
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
    }
    for url in urls:
        try:
            page = requests.get(url, headers=headers, timeout=5)
            page.encoding = page.apparent_encoding
            page_soup = BeautifulSoup(page.text, 'html.parser')
            for script in page_soup(["script", "style"]):
                script.decompose()
            text = page_soup.get_text()
            text = re.sub(r'\s+', ' ', text).strip()
            url_contents.append(text[:1000])
        except:
            continue
    
    # 提取关键词并搜索
    keywords = jieba.analyse.extract_tags(message, topK=3)
    search_results = search_and_extract(keywords)
    
    # 处理URL内容
    url_info = []
    for content in url_contents:
        url_info.append({
            'content': content,
            'source': '用户提供的URL'
        })
    
    # 处理搜索内容
    search_info = []
    for i, content in enumerate(search_results['content'].split('\n\n---\n\n')):
        search_info.append({
            'content': content,
            'source': f"搜索来源: {search_results['urls'][i]}"
        })
    
    # 合并所有内容
    all_contents = [item['content'] for item in url_info + search_info]
    sources = [item['source'] for item in url_info + search_info]
    
    # 构建来源信息
    source_info = "\n".join(sources)
    
    # 合并所有内容
    context = '\n\n---\n\n'.join(all_contents)
    
    # 构建prompt
    prompt = f"""用户问题:
{message}"""

    # 显示来源信息
    yield f"信息来源:\n{source_info}\n\n正在生成回答..."
    
    # 调用Ollama进行对话
    stream = completion(
            model="ollama/qwen:7b",
            messages = [
                {'content': '基于从网络得到的参考信息回答用户问题。如果参考信息不相关，则直接回答用户问题。', 'role': 'system'},
                {'content': f"""           
参考信息:
{context}""", 'role': 'assistant'},
                { "content": prompt,"role": "user"}
            ],
            api_base="http://localhost:11434",
            stream=True,
    )
    
    resp = ''
    for chunk in stream:
        resp = resp + chunk['message']['content']
        yield f"信息来源:\n{source_info}\n\n{resp}"

# 创建Gradio界面
iface = gr.ChatInterface(
    chat,
    title="AI助手",
    type="messages",
    description="我是一个能够搜索网络信息的AI助手,请随意问我问题!"
)

if __name__ == "__main__":
    iface.launch()
