from bs4 import BeautifulSoup
import requests
import xlwt

base_url = 'http://www.cnzyao.com/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
wb = xlwt.Workbook()
# 表格内容格式
style = xlwt.XFStyle()
style.alignment.wrap = 1
# 表格头部style
style1 = xlwt.XFStyle()
font = xlwt.Font()
font.bold = True
font.height = 150
font.colour_index = 0x0C
al = xlwt.Alignment()
al.horz = 0x02  # 设置水平居中
al.vert = 0x01  # 设置垂直居中
pattern = xlwt.Pattern()  # Create the Pattern
pattern.pattern = xlwt.Pattern.SOLID_PATTERN  # May be: NO_PATTERN, SOLID_PATTERN, or 0x00 through 0x12
pattern.pattern_fore_colour = 5
style1.alignment = al
style1.font = font
style1.pattern = pattern


# 解读药方内容详情（不包含名称）
def parse_dl(data, result):
    switcher = {
        '【方剂出处】': 'fjcc',
        '【方剂歌诀】': 'fjgj',
        '【方解】': 'fj',
        '【配方组成】': 'pfzc',
        '【使用方法】': 'syff',
        '【功效和作用】': 'gxhzy',
        '【临床应用】': 'lcyy',
        '【注意事项】': 'zysx'
    }
    for k, v in switcher.items():
        if k in data.find('dt').text:
            result[v] = data.find('dd').text


# 解析当页的数据
def parseArc(m_soup, outResult):
    try:
        all_arc = m_soup.find('div', class_='listmain').find('div', class_='left').find('div',
                                                                                        class_='arclist_area').find(
            'ul',
            class_='arclist_ul').findAll(
            'li')
        lis = []
        for arc in all_arc:
            a = arc.findAll('p')[0].find('a', class_='name_cla')
            if a['href'] not in lis:
                lis.append(a['href'])
        for li in lis:
            # 访问药方详情
            response = requests.get(li, headers=headers)
            soup = BeautifulSoup(response.content, 'html.parser')
            try:
                arc_on = soup.find('div', class_='arcmain').find('div', class_='left').find('div', class_='arcon')
                list_item = {}
                list_item['title'] = arc_on.find('h1').text
                dls = arc_on.find('div', class_='arcbody').findAll('dl')
                for item in dls:
                    parse_dl(item, list_item)
                outResult.append(list_item)
                print(list_item)
            except Exception as e:
                pass
    except Exception as e:
        pass


def start():
    # 发起调用
    response = requests.get(base_url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')
    # 解析方剂类型
    typeLabels = soup.find('div', class_='navmenu').findAll('div', class_='navbox')[0].findAll('ul')[1].findAll('li')
    types = []
    totalSize = 0
    for label in typeLabels:
        a_label = label.find('a')
        if a_label.text != '中药方剂':
            types.append({
                'text': a_label.text,
                'href': a_label['href']
            })
    a = types[::-1]
    for type in a:
        print(f'开始爬取【{type["text"]}】')
        sh = wb.add_sheet(type['text'], cell_overwrite_ok=True)
        sh.write(0, 0, '方剂名称', style1)
        sh.write(0, 1, '方剂出处', style1)
        sh.write(0, 2, '方剂歌诀', style1)
        sh.write(0, 3, '方解', style1)
        sh.write(0, 4, '配方组成', style1)
        sh.write(0, 5, '使用方法', style1)
        sh.write(0, 6, '功效和作用', style1)
        sh.write(0, 7, '临床应用', style1)
        sh.write(0, 8, '注意事项', style1)
        sh.col(0).width = 256 * 40
        sh.col(1).width = 256 * 40
        sh.col(2).width = 256 * 40
        sh.col(3).width = 256 * 40
        sh.col(4).width = 256 * 40
        sh.col(5).width = 256 * 40
        sh.col(6).width = 256 * 40
        sh.col(7).width = 256 * 40
        sh.col(8).width = 256 * 40
        # 发起请求
        url = (base_url + type['href']).replace("//", '/').replace('http:/', 'http://')
        response1 = requests.get(url, headers=headers)
        print(url)
        soup1 = BeautifulSoup(response1.content, 'html.parser')
        myResult = []
        # 解析一共有多少页
        all_li = \
            soup1.find('div', class_='listmain').find('div', class_='left').find('div', class_='arclist_area').findAll(
                'ul',
                class_='paging')[
                0].findAll('li')
        lis = []
        for li in all_li:
            try:
                a = li.find('a')
                if a['href'] not in lis:
                    lis.append(a['href'])
            except Exception as e:
                pass
        # 解读首页
        parseArc(soup1, myResult)
        # 解读其他页
        for href in lis:
            response2 = requests.get(href, headers=headers)
            soup2 = BeautifulSoup(response2.content, 'html.parser')
            parseArc(soup2, myResult)
        for i in range(0, len(myResult)):
            try:
                data = myResult[i]
                sh.write(i + 1, 0, data['title'], style)
                sh.write(i + 1, 1, data['fjcc'], style)
                sh.write(i + 1, 2, data['fjgj'], style)
                sh.write(i + 1, 3, data['fj'], style)
                sh.write(i + 1, 4, data['pfzc'], style)
                sh.write(i + 1, 5, data['syff'], style)
                sh.write(i + 1, 6, data['gxhzy'], style)
                sh.write(i + 1, 7, data['lcyy'], style)
                sh.write(i + 1, 8, data['zysx'], style)
            except Exception as e:
                print(f"发生异常{e}")
        totalSize += len(myResult)
    wb.save('./爬取的数据/中医药网.xls')
    print(f'处理完毕,共{totalSize}条方剂')


start()
