from bs4 import BeautifulSoup
from web import aget
import logging

logger = logging.getLogger(__file__)

async def baidu_search(keyword, pageNum=0, pageSize=10, tn=None):
    rsp = await aget("https://www.baidu.com/s", wd=keyword, pn=pageNum, rn=pageSize, ie='utf-8', tn=tn or 'baidurt')
    if not rsp.is_success:
        return None
    soup = BeautifulSoup(rsp.text, "html.parser")
    content = soup.find(attrs={'class': 'content'})
    if content:
        tabs = content.find_all('table')
        return [ _parse_tab(tab) for tab in tabs]
    content = soup.find(attrs={'id': 'content_left'})
    if content:
        tabs = soup.find_all(attrs={'class': 'result c-container xpath-log new-pmd'})
        return [ _parse_div(tab) for tab in tabs]
    return None

def normalize_consecutive_chars(s):
    import re
    normalized = re.sub(r'[ \n\t]+', lambda match: match.group(0)[0], s)
    return normalized

def _parse_tab(tab):
    link = tab.find('h3', attrs={'class': 't'}).find('a')
    realtime = tab.find('div', attrs={'class': 'realtime'})
    return {
        'title': link.text.strip(),
        'link': link.attrs['href'],
        'date': realtime.text.strip() if realtime else '',
        'brief': normalize_consecutive_chars(tab.find('font', attrs={'size': -1}).text.strip())
    }

def _parse_div(div):
    div = div.find('div', attrs={'class': 'c-container'})
    content = div.find('span', attrs={'class':"content-right_2s-H4"}) or div.find('span', attrs={'class': 'content-right_1THTn'})
    title = div.find('h3', attrs={'class': 'c-title t t tts-title'})
    d = content.previous_sibling
    return {
        'title': title.text.strip(),
        'link': title.find('a').attrs['href'],
        'date': d and d.text,
        'breif': normalize_consecutive_chars(content.text.strip())
    }
