from bs4 import BeautifulSoup
import requests
import xlwt

base_url = 'https://www.zhongyifangji.com'
headers = {
    'scheme': 'https',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept_Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'Cache-Control': 'no-cache',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Cookie': 'PHPSESSID=qsj6gt6m2bu04gm0cc177flhl4; __51uvsct__K1ChDRXFMCxBDlim=1; __51vcke__K1ChDRXFMCxBDlim=93a91f9f-70ff-5a85-b5dc-cab2738bc1f2; __51vuft__K1ChDRXFMCxBDlim=1695695132834; __vtins__K1ChDRXFMCxBDlim=%7B%22sid%22%3A%20%2242025c59-4eab-5acd-8fe1-beedccebbef4%22%2C%20%22vd%22%3A%2018%2C%20%22stt%22%3A%201392471%2C%20%22dr%22%3A%20244070%2C%20%22expires%22%3A%201695698325303%2C%20%22ct%22%3A%201695696525303%7D',
    'Sec-Ch-Ua-Platform': 'Windows',
    'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'save-orgin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1'
}
wb = xlwt.Workbook()
# 表格内容格式
style = xlwt.XFStyle()
style.alignment.wrap = 1
# 表格头部style
style1 = xlwt.XFStyle()
font = xlwt.Font()
font.bold = True
font.height = 150
font.colour_index = 0x0C
al = xlwt.Alignment()
al.horz = 0x02  # 设置水平居中
al.vert = 0x01  # 设置垂直居中
pattern = xlwt.Pattern()  # Create the Pattern
pattern.pattern = xlwt.Pattern.SOLID_PATTERN  # May be: NO_PATTERN, SOLID_PATTERN, or 0x00 through 0x12
pattern.pattern_fore_colour = 5
style1.alignment = al
style1.font = font
style1.pattern = pattern

def parse_dl(data, result):
    switcher = {
        '名称': 'mc',
        '分类': 'fl',
        '组成': 'zc',
        '用法': 'yf',
        '功用': 'gy',
        '主治': 'zz',
        '病机': 'bj',
        '运用': 'yy',
        '附方': 'ff',
        '方歌': 'fg',
        '出处': 'cc'
    }
    for k, v in switcher.items():
        if k in data.find('strong', class_='col-1').text:
            result[v] = data.find('div', class_='col-10').text


def parseArc(container, outResult):
    # 解析当页方剂列表
    all_div_col = container.find('div',
                                 class_='row row-cols-1 row-cols-sm-2 row-cols-md-4 g-3').findAll(
        'div', class_='col')
    all_arc_url = []
    for col in all_div_col:
        try:
            a = col.find('div', class_='card shadow-sm border-0').find('a')
            if a['href'] not in all_arc_url:
                all_arc_url.append(a['href'])
        except Exception as e:
            pass
    for url in all_arc_url:
        response3 = requests.get(f'{base_url}{url}', headers=headers)
        soup3 = BeautifulSoup(response3.content, 'html.parser')
        all_infos = soup3.findAll('div', class_='container')[1].find('div',
                                                                     class_='p-4 px-5 bg-body rounded shadow-sm').find(
            'div', class_='small').findAll('div', class_='border-bottom border-light row py-3')
        all_infos1 = soup3.findAll('div', class_='container')[1].find('div',
                                                                      class_='p-4 px-5 bg-body rounded shadow-sm').find(
            'div', class_='small').findAll('div', class_='row py-3')
        item = {
            'cc': '',
            'ff': '',
            'url': f'{base_url}{url}'
        }
        for info in all_infos:
            parse_dl(info, item)
        for info in all_infos1:
            parse_dl(info, item)
        outResult.append(item)


def start():
    # 发起调用
    response = requests.get(f'{base_url}/home/prescription/index', headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')

    list_a = soup.findAll('div', class_='container')[1].find('div',
                                                             class_='p-4 px-5 bg-body rounded shadow-sm mb-5 small').findAll(
        'div', class_='py-2 row')[1].find('div', class_='col').findAll('a')
    all_type = []
    totalSize = 0
    for label in list_a:
        if label.text != '全部':
            all_type.append({
                'text': label.text.replace('\t', '').replace('\n', ''),
                'href': label['href']
            })
    for m_type in all_type:
        # for i in range(0, 1):
        #     m_type = all_type[i]
        sh = wb.add_sheet(m_type['text'], cell_overwrite_ok=True)
        sh.write(0, 0, '方剂名称', style1)
        sh.write(0, 1, '出处', style1)
        sh.write(0, 2, '分类', style1)
        sh.write(0, 3, '配方组成', style1)
        sh.write(0, 4, '用法', style1)
        sh.write(0, 5, '功用', style1)
        sh.write(0, 6, '主治', style1)
        sh.write(0, 7, '方歌', style1)
        sh.write(0, 8, '病机', style1)
        sh.write(0, 9, '运用', style1)
        sh.write(0, 10, '附方', style1)
        sh.col(0).width = 256 * 40
        sh.col(1).width = 256 * 40
        sh.col(2).width = 256 * 40
        sh.col(3).width = 256 * 40
        sh.col(4).width = 256 * 40
        sh.col(5).width = 256 * 40
        sh.col(6).width = 256 * 40
        sh.col(7).width = 256 * 40
        sh.col(8).width = 256 * 40
        sh.col(9).width = 256 * 40
        sh.col(10).width = 256 * 40
        response1 = requests.get(f"{base_url}{m_type['href']}", headers=headers)
        soup1 = BeautifulSoup(response1.content, 'html.parser')
        myResult = []
        container = soup1.findAll('div', class_='container')[1]
        parseArc(container, myResult)
        # 解析一共有多少页
        pages = container.find('div', class_='mt-5 mb-3 text-center page')
        if pages.text.replace('\n', '') == '':
            pass
        else:
            hrefs = []
            for a in pages.findAll('a', class_='bg-white shadow-sm px-3 py-2 rounded mx-1'):
                if a['href'] not in hrefs:
                    hrefs.append(a['href'])
            for href in hrefs:
                response2 = requests.get(f"{base_url}{href}", headers=headers)
                soup2 = BeautifulSoup(response2.content, 'html.parser')
                container1 = soup2.findAll('div', class_='container')[1]
                parseArc(container1, myResult)
        for i in range(0, len(myResult)):
            try:
                data = myResult[i]
                print(data)
                sh.write(i + 1, 0, data['mc'], style)
                sh.write(i + 1, 1, data['cc'], style)
                sh.write(i + 1, 2, data['fl'], style)
                sh.write(i + 1, 3, data['zc'], style)
                sh.write(i + 1, 4, data['yf'], style)
                sh.write(i + 1, 5, data['gy'], style)
                sh.write(i + 1, 6, data['zz'], style)
                sh.write(i + 1, 7, data['fg'], style)
                sh.write(i + 1, 8, data['bj'], style)
                sh.write(i + 1, 9, data['yy'], style)
                sh.write(i + 1, 10, data['ff'], style)
            except Exception as e:
                print(f"发生异常{e}")
        totalSize += len(myResult)

    wb.save('./爬取的数据/中医方剂网.xls')
    print(f'处理完毕,共{totalSize}条方剂')


start()
