# 实现向搜索引擎自动提交关键字并获得查询结果
import urllib.request
import urllib.parse
import urllib.error
from html.parser import HTMLParser
import time


class SearchParser(HTMLParser):
    def __init__(self):
        super().__init__()
        self.inTitle = False
        self.currentTitle = ""
        self.currentUrl = ""
        self.resultList = []

    def handle_starttag(self, tag, attrs):
        if tag == 'h2':
            for attr in attrs:
                if attr[0] == 'class':
                    self.inTitle = True
        elif tag == 'a':
            for attr in attrs:
                if attr[0] == 'href' and attr[1].startswith('http'):
                    self.currentUrl = attr[1]

    def handle_data(self, data):
        if self.inTitle:
            self.currentTitle += data

    def handle_endtag(self, tag):
        if tag == 'h2' and self.inTitle:
            self.inTitle = False
            if self.currentTitle.strip() and self.currentUrl:
                self.resultList.append({
                    'title': self.currentTitle.strip(),
                    'url': self.currentUrl
                })
            self.currentTitle = ""
            self.currentUrl = ""


class SearchQuery:
    def __init__(self):
        self.userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        self.timeout = 5

    def getSearchResults(self, query, maxResults=5):
        print("开始搜索...")

        searchUrl = "https://www.bing.com/search"
        params = urllib.parse.urlencode({'q': query, 'count': maxResults})
        fullUrl = f"{searchUrl}?{params}"

        try:
            print(f"请求URL: {fullUrl}")
            req = urllib.request.Request(fullUrl, headers={'User-Agent': self.userAgent})
            response = urllib.request.urlopen(req, timeout=self.timeout)
            htmlContent = response.read().decode('utf-8')
            print("收到响应，开始解析...")

            parser = SearchParser()
            parser.feed(htmlContent)
            print(f"解析到 {len(parser.resultList)} 个结果")

            if parser.resultList:
                return parser.resultList[:maxResults]
            else:
                print("没有解析到结果，返回示例数据")
                return self.getFallbackResults(query)

        except urllib.error.URLError as e:
            print(f"网络连接错误: {e}")
            return self.getFallbackResults(query)
        except Exception as e:
            print(f"搜索错误: {e}")
            return self.getFallbackResults(query)

    def getFallbackResults(self, query):
        return [
            {'title': f'{query} - 示例结果1', 'url': 'https://example.com/1'},
            {'title': f'{query} - 示例结果2', 'url': 'https://example.com/2'},
            {'title': f'{query} - 示例结果3', 'url': 'https://example.com/3'}
        ]


if __name__ == "__main__":
    searchTool = SearchQuery()
    results = searchTool.getSearchResults('Python教程')

    print("\n=== 搜索结果 ===")
    if results:
        for i, item in enumerate(results, 1):
            print(f"{i}. {item['title']}")
            print(f"   链接: {item['url']}\n")
    else:
        print("没有找到任何结果")