import aiohttp
import asyncio
from bs4 import BeautifulSoup
from typing import List, Dict, Any
import re
from datetime import datetime
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class BaiduSearch:
    def __init__(self):
        self.base_url = "https://www.baidu.com/s"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        }
    
    async def search_papers(self, query: str, max_results: int = 100) -> List[Dict[str, Any]]:
        """
        搜索学术论文
        """
        search_query = f"{query} 论文 site:scholar.google.com OR site:arxiv.org OR site:researchgate.net OR site:academia.edu"
        return await self._search_baidu(search_query, max_results, "paper")
    
    async def search_prices(self, query: str, max_results: int = 50) -> List[Dict[str, Any]]:
        """
        搜索价格和供应商信息
        """
        search_query = f"{query} 价格 供应商 采购 site:alibaba.com OR site:made-in-china.com OR site:1688.com OR site:taobao.com"
        return await self._search_baidu(search_query, max_results, "price")
    
    async def _search_baidu(self, query: str, max_results: int, search_type: str) -> List[Dict[str, Any]]:
        """
        执行百度搜索并解析结果
        """
        results = []
        try:
            async with aiohttp.ClientSession() as session:
                params = {
                    'wd': query,
                    'rn': min(max_results, 50),  # 百度每页最多50条
                    'ie': 'utf-8',
                    'oe': 'utf-8'
                }
                
                async with session.get(self.base_url, params=params, headers=self.headers) as response:
                    if response.status == 200:
                        html = await response.text()
                        results = self._parse_baidu_results(html, search_type)
                    else:
                        logger.error(f"百度搜索失败，状态码: {response.status}")
        except Exception as e:
            logger.error(f"搜索过程中发生错误: {str(e)}")
        
        return results
    
    def _parse_baidu_results(self, html: str, search_type: str) -> List[Dict[str, Any]]:
        """
        解析百度搜索结果并去除广告
        """
        soup = BeautifulSoup(html, 'html.parser')
        results = []
        
        # 查找所有搜索结果
        result_containers = soup.find_all('div', class_='result')
        
        for container in result_containers:
            # 跳过广告（百度广告通常有特定的class）
            if container.find('span', class_='c-icon-badge'):
                continue
            
            # 跳过推广链接
            if '商业推广' in container.text or '广告' in container.text:
                continue
            
            title_elem = container.find('h3')
            if not title_elem:
                continue
                
            link_elem = title_elem.find('a')
            if not link_elem:
                continue
                
            title = link_elem.get_text(strip=True)
            url = link_elem.get('href', '')
            
            # 获取摘要
            abstract_elem = container.find('div', class_='c-abstract')
            abstract = abstract_elem.get_text(strip=True) if abstract_elem else ''
            
            # 获取时间信息（如果存在）
            time_info = self._extract_time_info(container)
            
            # 根据搜索类型处理结果
            result_data = {
                'title': title,
                'url': url,
                'abstract': abstract,
                'timestamp': time_info or datetime.now().isoformat(),
                'source': self._extract_source(url)
            }
            
            # 对价格搜索进行额外处理
            if search_type == "price":
                price_info = self._extract_price_info(title, abstract)
                result_data.update(price_info)
            
            results.append(result_data)
            
            if len(results) >= 100:  # 限制最大结果数
                break
        
        # 按时间排序（最新的在前）
        results.sort(key=lambda x: x['timestamp'], reverse=True)
        return results
    
    def _extract_time_info(self, container) -> str:
        """
        从搜索结果中提取时间信息
        """
        # 查找时间信息（百度搜索结果中的时间通常有特定格式）
        time_patterns = [
            r'(\d{4}年\d{1,2}月\d{1,2}日)',
            r'(\d{4}-\d{1,2}-\d{1,2})',
            r'(\d{1,2}天前)',
            r'(\d{1,2}小时前)',
            r'(\d{1,2}分钟前)'
        ]
        
        container_text = str(container.get_text())
        for pattern in time_patterns:
            match = re.search(pattern, str(container_text))
            if match:
                return str(match.group(1))
        
        return datetime.now().isoformat()
    
    def _extract_source(self, url: str) -> str:
        """
        从URL中提取来源网站
        """
        from urllib.parse import urlparse
        parsed_url = urlparse(url)
        return str(parsed_url.netloc)
    
    def _extract_price_info(self, title: str, abstract: str) -> Dict[str, Any]:
        """
        从标题和摘要中提取价格信息
        """
        price_info = {
            'price': None,
            'currency': 'CNY',
            'supplier': None,
            'quantity': None
        }
        
        # 价格正则表达式模式
        price_patterns = [
            r'¥\s*([\d,]+(?:\.\d{2})?)',  # ¥ 价格
            r'￥\s*([\d,]+(?:\.\d{2})?)',  # ￥ 价格
            r'(\d+(?:\.\d{2})?)\s*元',     # 数字 + 元
            r'(\d+(?:\.\d{2})?)\s*RMB',    # 数字 + RMB
            r'(\d+(?:\.\d{2})?)\s*人民币'   # 数字 + 人民币
        ]
        
        text = f"{title} {abstract}"
        
        # 查找价格
        for pattern in price_patterns:
            match = re.search(pattern, text)
            if match:
                price_info['price'] = match.group(1).replace(',', '')
                break
        
        # 查找供应商（简单的启发式方法）
        supplier_keywords = ['公司', '厂家', '供应商', '生产商', '制造商', '企业']
        words = text.split()
        for word in words:
            if any(keyword in word for keyword in supplier_keywords):
                price_info['supplier'] = word
                break
        
        # 查找数量信息
        quantity_patterns = [
            r'(\d+)\s*克',
            r'(\d+)\s*千克',
            r'(\d+)\s*公斤',
            r'(\d+)\s*吨',
            r'(\d+)\s*毫升',
            r'(\d+)\s*升'
        ]
        
        for pattern in quantity_patterns:
            match = re.search(pattern, text)
            if match:
                price_info['quantity'] = match.group(1)
                break
        
        return price_info

# 创建全局实例
baidu_searcher = BaiduSearch()

# 异步测试函数
async def test_search():
    searcher = BaiduSearch()
    
    # 测试论文搜索
    papers = await searcher.search_papers("二氧化硅", 10)
    print("论文搜索结果:")
    for paper in papers:
        print(f"标题: {paper['title']}")
        print(f"链接: {paper['url']}")
        print(f"时间: {paper['timestamp']}")
        print("---")
    
    # 测试价格搜索
    prices = await searcher.search_prices("二氧化硅", 10)
    print("价格搜索结果:")
    for price in prices:
        print(f"标题: {price['title']}")
        print(f"价格: {price.get('price', 'N/A')}")
        print(f"供应商: {price.get('supplier', 'N/A')}")
        print("---")

if __name__ == "__main__":
    asyncio.run(test_search())