import requests
from bs4 import BeautifulSoup
from lxml import etree



url = "https://www.cnipa.gov.cn/module/web/jpage/dataproxy.jsp?startrecord=1&endrecord=20&perpage=20"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"}
data ={
    "webname":"国家知识产权局",
    "path":"https://www.cnipa.gov.cn/",
    "unitid":"485",
    "columnid":"57",
    "col":"1",
    "webid":"1",
    "sourceContentType":"1",
    "permissiontype":"0",
}

response = requests.post(url, headers=headers, data=data).content.decode('utf-8')

soup = BeautifulSoup(response, 'xml')
records = soup.find_all('record')

result = []

for record in records:
    cdata = record.get_text()
    # 解析CDATA中的HTML
    html_soup = BeautifulSoup(cdata, 'html.parser')
    date_tag = html_soup.find('span')

    # 提取链接和标题
    link_tag = html_soup.find('a')
    title = link_tag.get_text(strip=True) # 获取链接的文本内容
    href = link_tag.get('href')  # 获取链接的href属性

    date = date_tag.get_text(strip=True) if date_tag else None
    print(link_tag)
    print(title)
    print(href)
    print(date)

    """
    if link_tag:
        title = link_tag.get_text(strip=True)
        href = link_tag.get('href')

        # 处理相对链接
        if href and not href.startswith('http'):
            if href.startswith('/'):
                href = f'https://www.cnipa.gov.cn{href}'
            else:
                href = f'https://www.cnipa.gov.cn/attach/0/{href}'

        # 提取日期
        date_tag = html_soup.find('span')
        date = date_tag.get_text(strip=True) if date_tag else None

        # 添加到结果集
        if title and href and date:
            result.append({
                'title': title,
                'link': href,
                'date': date
            })
    """
# # 输出结果
# print(f"解析完成，共提取 {len(result)} 条公告")
# for i, item in enumerate(result[:5], 1):  # 打印前5条作为示例
#     print(f"{i}. {item['date']} - {item['title']}")
#     print(f"   链接: {item['link']}\n")