import requests
import re
import csv
from bs4 import BeautifulSoup
import time

urls = []
errorLog = []
"""
以下各级写在第一个的为对应列标题
想必你也能想到，如果想要一个自定义的名字你就只需要在第一个添加一个列标，即使它并不会爬取到
"""
tags_map: tuple = {
    "pic": 0,
    "中药名": 1,
    "药名": 1,
    "正名": 1,
    "别名": 2,
    "英文名": 3,
    "外语名": 3,
    "来源": 4,
    "药材来源": 4,
    "药用部位": 4,
    "药用来源": 4,
    "形态": 5,
    "植物形态": 5,
    "动物形态": 5,
    "矿物形态": 5,
    "菌体形态": 5,
    "昆虫形态": 5,
    "分布": 6,
    "产地分布": 6,
    "生境分布": 6,
    "加工": 7,
    "采收加工": 7,
    "加工炮制": 7,
    "制作加工": 7,
    "药材性状": 8,
    "功效与作用": 9,
    "归经": 10,
    "性味归经": 10,
    "性味功能": 10,
    "药材归经": 10,
    "临床应用": 11,
    "I临床应用": 11,
    "用法用量": 11,
    "化学成分": 12,
    "主要成分": 12,
    "药理研究": 13,
    "药理成分": 13,
    "药理": 13,
    "现代研究": 13,
    "使用禁忌": 14,
    "配伍药方": 15,
    "配合药方": 15,
    "药方": 15,
    "相关药方": 15,
}
tags_map_list_num: int = 15

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Cache-Control": "no-cache",
    "DNT": "1",
    "Host": "www.zhongyoo.com",
    "Pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
}


def read_urls_from_file(file_path):
    with open(file_path, "r", encoding="utf-8") as file:
        for line in file:
            urls.append(line.strip())  # 去除每行末尾的换行符


# 从文件读取 URL
read_urls_from_file("urls.txt")


def getFromUrl(url, headers=headers):
    # 发送 GET 请求
    try:
        response = requests.get(url=url)
    except requests.exceptions.Timeout:
        errorLog.append("[ERROR]超时:" + url)
        return

    response_code = None
    try:
        response.encoding = "gb18030"
        response_code = response.text
    except UnicodeDecodeError:
        errorLog.append("[ERROR]无法解码:" + url)
        return

    soup = BeautifulSoup(response_code, "html.parser")

    # 找到所有 class 为 'text' 的元素
    text_elements = soup.find_all(class_="text")
    img_tags = soup.find_all(
        "img", attrs={"src": lambda x: x and "/uploads/allimg" in x}
    )
    img_srcs = [img["src"] for img in img_tags]
    # 将提取的数据组合起来
    if len(text_elements) != 0:
        extracted_text = extracted_text = text_elements[-1].get_text(strip=True)
    else:
        errorLog.append("[WARN]未抓取到text class:" + url)

    pattern = r"【(.*?)】(.*?)(?=【|$)"  # 提取【】中的内容以及其后的描述，直到下一个【或文本结束
    matches = re.findall(pattern, extracted_text, re.S)  # re.S 让.匹配换行符
    """ for match in matches:
        match:tuple
        if(match[0] not in tags_map):
            tags_map[match[0]] = len(tags_map) """

    print("正在爬取:" + matches[0][1] + "\n" + url)
    with open("./data.csv", mode="a", encoding="utf-8", newline="\n") as file:
        writer = csv.writer(file)
        add = [""] * (len(tags_map))
        if len(img_srcs) < 0:
            errorLog.append("[WARN]未抓取到符合条件的img元素:" + url)
            return
        else:
            add[0] = img_srcs[0]

        now_ptr: int = 0

        tags_map_which_add = [""] * (len(tags_map))
        for match in matches:
            global tags_map_list_num
            match: tuple
            if match[0] in tags_map:
                pos: int = tags_map[match[0]]
                if pos >= len(add):
                    add.extend([""] * (pos - len(add) + 1))
                if add[pos] != "":
                    """errorLog.append("[NOTE]出现多个相同级别标签，默认合并:" + match[0] + ":" + url)"""
                    errorLog.append(
                        f"[NOTE]已合并{match[0]}和{tags_map_which_add[pos]}:{url}"
                    )
                add[pos] += match[1]
                tags_map_which_add[pos] = match[0]
            else:
                errorLog.append("[WARN]:未定义标签:" + match[0] + ":" + url)
                tags_map_list_num += 1
                tags_map[match[0]] = tags_map_list_num
                add.extend([""] * (tags_map_list_num - len(add)))
                tags_map_which_add.extend(
                    [""] * (tags_map_list_num - len(tags_map_which_add))
                )
                add[tags_map[match[0]]] += match[1]
                tags_map_which_add[tags_map[match[0]]] = match[0]

        if len(add) == 0:
            errorLog.append("[NOTE]写入结果为空,已拦截该次写入:" + url)
        else:
            ptr: int = len(add) - 1
            while add[ptr] == "":
                del add[ptr]
                ptr -= 1
            writer.writerow(add)


for url in urls:
    getFromUrl(url)
    if url != urls[-1]:
        time.sleep(10)

if len(errorLog) > 0:
    print("运行日志:")
    index = 1
    for i in errorLog:
        print(str(index) + ":" + i)
        index += 1
else:
    print("全部URL解析成功")

rows = []
with open(
    "data.csv", mode="r", encoding="utf-8"
) as file:  # 或者使用 'utf-8-sig'，'gbk'，根据文件实际编码
    reader = csv.reader(file)
    rows = list(reader)
csv_header = []
for i in range(0, tags_map_list_num + 1):
    for item in tags_map:
        if tags_map[item] == i:
            csv_header.append(item)
            break
rows.insert(0, csv_header)
with open("data_with_header.csv", mode="w", newline="", encoding="utf-8") as file:
    writer = csv.writer(file)
    writer.writerows(rows)
