import requests
import data_parse
import time

BASE_URL = 'https://baike.lingyaai.cn/'
BASE_URL_DETAIL = 'https://baike.lingyaai.cn{}'


def save_lingya_index():
    """
    将首页内容保存到本地
    :return: none
    """
    response = requests.get(BASE_URL)
    if response.status_code == 200:
        html = response.text
        with open('lingya_index.html', 'wb') as html_file:
            html_file.write(html.encode(encoding='utf-8'))


def save_detail_page(suffix: str):
    """
    将详情页保存下来
    :param suffix:后缀
    :return: None
    """
    time.sleep(0.2)
    abs_url = BASE_URL_DETAIL.format(suffix)
    print(abs_url)
    response = requests.get(abs_url)
    if response.status_code == 200:
        html = response.text
        prefix = '.{}'
        with open(prefix.format(suffix), 'wb') as html_file:
            html_file.write(html.encode(encoding='utf-8'))

def get_suffix(html: str):
    caa_yees = data_parse.lingya_index_parse(html)
    # 将前7类茶叶的子类后缀信息提取出来
    for i in range(7):
        caa_yee_class_children = caa_yees[i][4].xpath('./a')
        # 将每类茶叶的子类信息提取出来
        for child in caa_yee_class_children:
            href = child.xpath('./@href')[0]
            save_detail_page(suffix=href)


# 1.数据爬取：先将首页数据保存到本地，解析数据直接从本地获取文本，降低目标网站压力
# save_lingya_index()

# 2.详情页爬取：将详情页数据爬取下来，保存到details文件夹下，下次需要直接从文件夹获取
with open('lingya_index.html', 'r', encoding='utf-8') as html_file:
    html = html_file.read()
    get_suffix(html)