
import requests
from bs4 import BeautifulSoup
import re

def getContext(url):
# 设置请求头，模拟浏览器访问
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
    }
    
    # 发送GET请求，并带上请求头
    response = requests.get(url, headers=headers)
    
    # 检查请求是否成功
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML;html/lxml
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup
        
    else:
        print(f"Failed to retrieve page with status code: {response.status_code}")
        return None

def getOneData(url,successFile,errorFile):
    print(url)
    soup = getContext(url)
    # print(soup.prettify())
    
    # 示例：查找页面上的内容
    bodyContent = soup.select_one('#bodyContent')
    content = bodyContent.get_text()
            
    pattern = r'[\s\S]*A\+医学百科.*>>(.*)\n([\s\S]*)目录[\s\S]*(适应证[\s\S]*)参看[\s\S]*'
    match = re.search(pattern,content)
    
    if match:
        content = match.group(1) + match.group(2) + match.group(3)        
    else:
        pattern = r'[\s\S]*A\+医学百科.*>>(.*)\n([\s\S]*)参看[\s\S]*'
        match = re.search(pattern,content)
        if match:            
            content = match.group(1) + match.group(2)
        else:
            # 记录错误的数据
            errorFile.write(url+'\n')
            print("No match found")
            return
        
    # 处理异常的文件名
    filename = match.group(1).replace('\\', '-').replace('/', '-')
    # 替换多余的空格
    content = re.sub(' ', '', content)
    # 替换多余的换行符
    content = re.sub('\n\n', '\n', content)
    # 去掉首尾的空格
    content = content.strip()
    # 记录成功的数据
    successFile.write(url+'\n')
    
    with open('data/'+filename+'.txt','w',encoding='utf-8') as f:
        f.write(content)

def getData(url):
    with open('dualdata/successFile.txt','w',encoding='utf-8') as successFile:
        with open('dualdata/errorFile.txt','w',encoding='utf-8') as errorFile:              
            soup = getContext(url)  
            bodyContent = soup.select_one('#bodyContent')
            content = bodyContent.get_text()
                    
            pattern = r'[\s\S]*药物列表\n([\s\S]*)参看\n[\s\S]*'
            match = re.search(pattern,content)
            titles = match.group(1).split('\n')
            for title in titles:
                if len(title) < 2:
                    continue
                link = soup.find('a', title=title)
                if link:
                    getOneData("http://www.a-hospital.com"+link.get('href'),successFile,errorFile)   
    

if __name__ == "__main__":
    # getData("http://www.a-hospital.com/w/%E9%9D%92%E9%9C%89%E7%B4%A0G")
    getData("http://www.a-hospital.com/w/%E9%9D%92%E9%9C%89%E7%B4%A0%E7%B1%BB%E8%8D%AF%E7%89%A9")