from requests import Session
from bs4 import BeautifulSoup
import downloader
from pathlib import Path
import urllib
import json
import re
import translator
import utils
import idmapper

session = Session()
session.trust_env = False
session.adapters.DEFAULT_RETRIES=10
session.headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67"}

# 栖息地地图的存放路径
spawning_maps_path = Path("img/spawning-maps")
# 所有已下载的栖息地地图文件
spawning_maps={file.name for file in spawning_maps_path.glob("*")}

# 分布数据svg图的存放路径
spawning_svgs_path = Path("img/spawning-svg")
# 所有已下载的svg图
spawning_svgs = {file.name for file in spawning_svgs_path.glob("*")}

# 生物图标路径
icon_path = Path("img/creatures-icon")
# 现存的图标文件
icon_files = {file.name for file in icon_path.glob("*")}

# 生物图鉴路径
dossier_path = Path("img/dossiers")
# 现存的生物图鉴
dossier_files = {file.name for file in dossier_path.glob("*")}

# 生物叫声路径
sound_path = Path("audio/creatures-sound")
# 现存的叫声文件
sound_files = {file.name for file in sound_path.glob("*")}

# 生物相册的路径
gallery_path = Path("img/gallery")
# 现在的生物图片
gallery_files = {file.name for file in gallery_path.glob("*")}

# 属性状态图标路径
stats_path = Path("img/stats")
# 现存的属性图标文件
stats_files = {file.name for file in stats_path.glob("*")}

def get_name(soup:BeautifulSoup)->str:
    """ 获取生物的名字 """
    div = soup.find("div",class_="info-X2-75")
    if div is None:
        raise Exception("未找到名字")
    return div.get_text(strip=True)

def get_icon(soup:BeautifulSoup,update_icon:bool)->str:
    """ 获取生物图标 """
    img=soup.find("div",class_="info-X2-25").img
    if img is None:
        return "Missing.png"
    img_name = img.get("data-image-name")
    if img_name == "Blank.png":
        return "Missing.png"
    if img_name not in icon_files or update_icon:
        src = img.get("data-src")  # 取出图标的地址
        if src is None:
            src = img.get("src")
        src=src.split("/revision/")[0] # 去掉参数，获得原图的地址
        downloader.add_task(src, icon_path/img_name)
        icon_files.add(img_name)
    return img_name

def get_dossier(soup:BeautifulSoup,update_dossier:bool)->str:
    """ 获取生物的图鉴 """
    imgs = soup.select(".info-column img")
    for img in imgs:
        img_name = img.get("data-image-name")
        if img_name in ("Dossier Empty.png","Blank.png"):
            continue
        if img_name not in dossier_files or update_dossier:
            src=img.get("data-src")
            if src is None:
                src = img.get("src")
            src=src.split("/revision/")[0]
            downloader.add_task(src, dossier_path/img_name)
            dossier_files.add(img_name)
        return img_name
    return "Missing.png"

def get_creature_sound(soup:BeautifulSoup,update_sound)->str:
    """ 获取生物叫声的url """
    info_framework = soup.find("div",class_="info-framework")
    audio = info_framework.find("audio")
    if audio is None:
        return None
    src=audio.get("src").split("/revision/")[0]
    audio_name = src.split("/",maxsplit=-1)[-1]
    if audio_name not in sound_files or update_sound:
        downloader.add_task(src, sound_path/audio_name)
        sound_files.add(audio_name)
    return audio_name

def get_groups(soup:BeautifulSoup)->list:
    """ 获取生物所属的分组 """
    div = soup.find("div",text="Group")
    if div is None:
        return []
    groups = [a.get("title") for a in div.find_next_sibling().select("a")]
    return groups

def get_dlcs(soup:BeautifulSoup)->list:
    """ 获取有该生物的dlc """
    a = soup.find("a",text="DLCs")
    if a is None:
        return []
    div = a.parent.find_next_sibling()
    dlcs = []
    for a in div.select("a"):
        title = a.get("title")
        id=idmapper.get_id(title)
        dlcs.append(id)
    return dlcs

def get_torpor_immune(soup:BeautifulSoup)->bool:
    """ 查询生物是否免疫眩晕 """
    a=soup.find("a",text="Torpor Immune")   # 找到字符为"Torpor Immune"的a标签
    if a is None:
        return True                         # 没有相关数据的情况下，默认免疫眩晕
    div=a.parent                            # a标签上一级的div标签
    result=div.find_next_sibling().get_text(strip=True) # 与div标签相邻的下一个div标签内就是结果
    return result=="Yes"

def get_radiation_immune(soup:BeautifulSoup)->bool:
    """ 查询生物是否免疫辐射 """
    a=soup.find("a",text="Radiation Immune")
    if a is None:
        return False
    div = a.parent
    result = div.find_next_sibling().get_text(strip=True)
    return result=="Yes"

def get_no_saddle_needed(soup:BeautifulSoup)->bool:
    """ 查询生物是否支持无鞍骑乘 """
    a = soup.find("a",title="Category:Creatures that can be ridden without saddles",text="No Saddle Needed")
    return a is not None

def get_feces(soup:BeautifulSoup)->list:
    """ 获取生物的排泄物，注:feces是拉丁文单词，单数形式是faex """
    a=soup.find("a",text="Feces")
    if a is None:
        return []
    div = a.parent.find_next_sibling()
    feces = []
    for a in div.select("a"):
        title = a.get("title")
        if title in ("ARK: Survival Evolved Mobile","Category:Creatures that don't drop feces"):
            continue
        item_id=idmapper.get_id(title)
        if item_id not in feces:
            feces.append(item_id)
    return feces

def get_base_xp(soup:BeautifulSoup)->float:
    """ 获取基础经验值，具体经验的计算公式为 基础经验*[1+(等级-1)*0.1] """
    span=soup.find("span",id="creatureKillXP")
    if span is None:
        return 0
    return float(span.get("data-basexp"))

def get_drag_weight(soup:BeautifulSoup)->float:
    """ 获取拖拽重量 """
    a = soup.find("a",text = "Drag Weight")
    if a is None:
        return -1
    div = a.parent
    result=div.find_next_sibling().get_text(strip=True)
    return float(result)

def get_equipments(soup:BeautifulSoup)->list:
    """ 获取生物可装备的物品 """
    div = soup.find("div",text=" Equipment\n  ")
    if div is None:
        return []
    equipments = set()
    for a in div.find_next_sibling().select("a"):
        title = a.get("title")
        if title in ("ARK: Survival Evolved Mobile","Category:Creatures that can be ridden without saddles","Scorched Earth","Aberration","Extinction","Genesis: Part 1","Genesis: Part 2"):
            continue
        item_id = idmapper.get_id(title)
        equipments.add(item_id)
    return list(equipments)

def get_skins(soup:BeautifulSoup)->list:
    """ 获取生物的皮肤 """
    a = soup.find("a",text="Skins")
    if a is None:
        return []
    skins = set()
    div = a.parent.find_next_sibling()
    for a in div.select("a"):
        title = a.get("title")
        item_id = idmapper.get_id(title)
        skins.add(item_id)
    return list(skins)

def get_taming_methods(soup:BeautifulSoup)->list:
    """ 获取驯服方法 """
    a=soup.find("a",title="Taming",text="Taming")
    if a is None:
        return []
    div=a.parent
    return div.find_next_sibling().get_text(strip=True).split("or")

def get_rider_weaponary(soup:BeautifulSoup)->bool:
    """ 获取是否支持骑乘武器 """
    a=soup.find("a",text="Rider Weaponry")
    if a is None:
        return False
    div = a.parent
    result=div.find_next_sibling().get_text(strip=True)
    return result == "Yes"

def get_loots(soup:BeautifulSoup)->list:
    """ 获取生物掉落的战利品 """
    div = soup.find("div",text=" Loot\n  ")
    if div is None:
        return []
    loots = []
    for li in div.find_next_sibling().select("ul>li"):
        title = li.a.get("title")
        item_id = idmapper.get_id(title)
        loots.append(item_id)
    return loots

def get_drops(soup:BeautifulSoup)->list:
    """ 获取生物的掉落物品 """
    span = soup.find("span",id="Drops")
    if span is None:
        return []
    drops = []
    tag = span.parent.find_next_sibling()
    if tag.name == "ul":
        ul = tag
    elif tag.name == "div":
        ul = tag.find("ul",class_="itemlist")
    elif tag.name in ["h4","dl"]:
        ul = tag.find_next_sibling()
    elif tag.name =="p":
        if tag.get_text(strip=True) in ["All Variants drop:"]:
            ul = tag.find_next_sibling()
        else:
            return []
    elif tag.name in ["h2","h3"]:
        return []
    else:
        raise Exception("未知网页的数据格式")
    if ul is None:
        return []
    for li in ul.select("li"):
        a = li.a
        if a is None:
            continue
        title = a.get("title")
        item_id = idmapper.get_id(title)
        drops.append(item_id)
    return drops

def get_stats(soup:BeautifulSoup)->list:
    """ 获取生物的属性信息 """
    stats = []
    span = soup.find("span",id="Base_Stats_and_Growth")
    if span is None:
        return []
    h3 = span.parent
    tag = h3.find_next_sibling()
    if tag.name in ("p","ul"):
        tag = tag.find_next_sibling()
    elif tag.name in ("h1","h2","h3","h4"):
        return []
    elif tag.name == "div" and tag.get("style") == "padding-bottom:5px;padding-left:25px":
        # 独角兽的属性与庞马一致，所以没有页面中没有列出
        return []
    # 提取出所有种类的名称
    if tag.get("class") is None:
        names=["default"]
    else:
        names=[a.get_text(strip=True) for a in tag.select(".wds-tabs__wrapper a")]
    # 属性表
    stat_tables = soup.find_all("table",attrs={"data-description":"Base Stats and Growth"})
    if len(stat_tables)==0:
        stat_tables = tag.select("table")
    # 速度表
    speed_tables = soup.find_all("table",attrs={"data-description":"Movement Speed"})
    if len(speed_tables)==0:
        speed_tables = tag.find_next_sibling().select("table")
    if len(names)!=len(stat_tables) or len(stat_tables)!= len(speed_tables):
        raise ValueError("种类名称的数量与状态数据的数量不对应")
    for i in range(len(names)):
        stat = {}
        stat["id"] = f"stats{i}"
        stat["name"] = names[i]
        # 获取属性值
        attributes = {}
        for tr in stat_tables[i].select("tr"):
            attribute = {}
            tds = tr.select("th[style='text-align: left'],td")
            if len(tds)==0:
                continue
            # 属性的名称
            name = tds[0].get_text(strip=True)
            attribute["name"] = name
            # 属性的图标
            img = tds[0].img
            img_name = img.get("data-image-name")
            attribute["icon"] = img_name
            if img_name not in stats_files:
                src = img.get("data-src")
                if src is None:
                    src = img.get("src")
                src = src.split("/revision/",maxsplit=1)[0]
                downloader.add_task(src, stats_path/img_name)
                stats_files.add(img_name)
            # 属性的名称转驼峰命名作为id和键值
            key=utils.to_camel(name)
            attribute["id"] = key
            attributes[key]=attribute

            # 属性的基础值
            value = tds[1].get_text(strip=True)
            if value == "N/A":
                attribute["base"] = 0
            elif value == "?":
                attribute["base"] = 0
            else:
                value = value.split("/")[0]
                value = value.replace("%","")
                attribute["base"] = float(value)
            # 野生状态下的每级加点
            value = tds[2].get_text(strip=True)
            if value == "N/A":
                attribute["wildIncrease"] = 0
            elif value == "?":
                attribute["wildIncrease"] = 0
            else:
                value = value.split("/")[0]
                value = value.replace("%","")
                attribute["wildIncrease"] = float(value)
            # 如果是攻击力、移动速度、电荷辐射范围的话，属性值和加点转换成百分比(非小数，100%用100表示)
            if key in ["meleeDamage","movementSpeed","chargeEmissionRange"]:
                if attribute["base"]==0:
                    attribute["wildIncrease"] = 0
                else:
                    attribute["wildIncrease"] = attribute["wildIncrease"]/attribute["base"]*100
                attribute["base"]=100
            # 如果是不可驯服的话，跳过后续的属性
            if len(tds)<4:
                continue
            # 驯服后的每级加点
            value = tds[3].get_text(strip=True)
            if value == "N/A":
                attribute["tamedIncrease"] = 0
            elif value == "?":
                attribute["tamedIncrease"] = 0
            else:
                value = value.replace("+","")
                value = value.replace("%","")
                attribute["tamedIncrease"] = float(value)
            # 驯服后的奖励Ta
            attribute["tamedAddition"] = tds[4].get_text(strip=True)
            # 驯服后的奖励Tm
            attribute["tamedMultiplication"] = tds[5].get_text(strip=True)
        stat["attributes"] = attributes
        # 获取移速
        speeds = {}
        for tr in speed_tables[i].select("tr"):
            tds = tr.select("th[style='text-align:left'],td")
            if len(tds)==0:
                continue
            speed = {}
            speed["name"]= tds[0].get_text(strip=True)
            key = utils.to_camel(speed["name"])
            speed["id"] = key
            speeds[key] = speed
            speed["wildBase"] = tds[1].get_text(strip=True) # 野生状态下的基础移速
            if len(tds)==8:
                speed["tamedBase"] = tds[2].get_text(strip=True) # 驯服后的基础移速
                speed["wildSprinting"] = tds[4].get_text(strip=True) # 野生状态下的冲刺速度
                speed["tamedSprinting"] = tds[5].get_text(strip=True) # 驯服后的冲刺速度
                speed["staminaCost"] = tds[7].get_text(strip=True)  # 冲刺时的耐力消耗
            elif len(tds) == 3:
                speed["wildSprinting"] = tds[2].get_text(strip=True) # 野生状态下的冲刺速度
            elif len(tds) == 12:
                speed["tamedBase"] = tds[2].get_text(strip=True)
                speed["riddenBase"] = tds[4].get_text(strip=True) # 骑乘状态下的基础移速
                speed["wildSprinting"] = tds[6].get_text(strip=True)
                speed["tamedSprinting"] = tds[7].get_text(strip=True)
                speed["riddenSprinting"] = tds[9].get_text(strip=True)  # 骑乘状态下的冲刺速度
                speed["staminaCost"] = tds[11].get_text(strip=True)
            else:
                raise ValueError(f"未知的表格格式,列数{len(tds)}")
            # 将文本格式的数值转换成浮点数
            for key,value in speed.items():
                if key in ("name","id"):
                    continue
                try:
                    speed[key] = float(value)
                except:
                    speed[key] = 0
        stat["speeds"] = speeds
        stats.append(stat)
    return stats

def get_attacks(soup:BeautifulSoup)->list:
    """ 获取攻击数据 """
    attacks = []
    # 新版格式，目前采用这种格式的有飞龙，还有最近增强的斑龙
    rows = soup.select(".dino-attack-info tr")
    if len(rows)>0:
        for row in rows:
            attack={}
            attack["name"] = row.th.big.text
            # 获取键位
            keys = []
            span=row.th.span
            if span is not None:
                key = {
                    "name":span.get_text(strip=True),
                    "platform":"PC"
                }
                keys.append(key)
            for a in row.select("th>a"):
                key={}
                if a.img is not None:
                    title = a.get("title")

                    key["name"] = title
                    key["platform"] = a.img.get("data-image-name").split(" ",maxsplit=-1)[0]
                keys.append(key)
            attack["keys"] = keys
            # 获取消耗的耐力
            a = row.th.find("a",title = "Stamina")
            attack["staminaCost"] = int(a.get_text(strip=True))
            # 获取攻击的描述
            attack["descriptions"] = [li.text.strip() for li in row.td.select("li")]
            # 获取攻击的伤害
            b = row.td.find("b",text=re.compile(r".*\d*\.?\d* damage.*"))
            if b is None:
                attack["damage"]=0
            else:
                match=re.search(r"(\d+\.?\d*).*damage", b.get_text())
                match=re.search(r"(\d+\.?\d*).*damage", "26 immediate damage")
                attack["damage"] = float(match.group(1))
            # 添加数据
            attacks.append(attack)
        return attacks
    # 旧版格式，一堆非常复杂的表格
    h3 = soup.find("span",id="Base_Stats_and_Growth")
    if h3 is not None:
        div = h3.parent.find_next_sibling()
        # 如果还记录有其他变种的攻击数据的话，则缩小搜索范围，只提取原型的攻击数据
        if div.get("class") is not None and div.get("class")[0] == "tabber":
            soup=div.find("div",class_="wds-tab__content")
    tables = soup.find_all("table",attrs={"data-description":"Attacks"})
    if len(tables)>0:
        for table in tables:
            attack = {}
            rows = table.select("tr")
            # 第一行内有攻击的名称
            attack["name"]=rows[0].th.text
            # 提取出位于第2行的攻击描述
            attack["descriptions"] = [rows[1].td.text]
            # 提取出位于第2行的键位
            keys=[]
            key_tag = rows[1].td.select("a[class='image'],kbd") # 含有键位的a标签
            for tag in key_tag:
                key={}
                if tag.name=="a":
                    key["name"] = tag.get("title")
                else:
                    key["name"] = tag.get_text()
                string = tag.next_sibling.string
                match = re.match(".*\((.*)\).*", string)
                key["platform"]= match.group(1)
                keys.append(key)
            attack["keys"] = keys
            # 第2行内有耐力消耗
            tds = rows[2].select("td")
            attack["staminaCost"]=tds[0].text
            # 如果有第6行的话，第6行的第1个td标签内是攻击的伤害
            if len(rows)>=6:
                if rows[5].td is None:
                    attack["damage"] = 0
                elif rows[5].td.get_text(strip=True) == "":
                    attack["damage"] = 0
                else:
                    attack["damage"] = float(rows[5].td.get_text(strip=True))
            attacks.append(attack)
        return attacks
    return attacks

def get_trapped_by(soup:BeautifulSoup)->list:
    """ 获取生物可被哪些陷阱困住 """
    a = soup.find("a",text="Immobilized By")
    if a is None:
        return []
    trapped_by = []
    div = a.parent.find_next_sibling("div")
    for a in div.select("a"):
        if a.img is None:
            title = a.get("title")
            item_id = idmapper.get_id(title)
            trapped_by.append(item_id)
    return trapped_by

def get_can_damage(soup:BeautifulSoup)->list:
    """ 获取该生物可伤害的建筑 """
    a = soup.find("a",text="Can Damage")
    if a is None:
        return []
    can_damage = []
    div=a.parent.find_next_sibling("div")
    for a in div.find_all("a"):
        if a.img is None:
            title=a.get("title")
            item_id = idmapper.get_id(title)
            can_damage.append(item_id)
    return can_damage

def get_carryable_by(soup:BeautifulSoup,creature_id:str)->list:
    """ 从抓取列表页中查询指定生物可被哪些生物抓起 """
    a = soup.find("a",href=f"/wiki/{creature_id}")
    if a is None:
        return []
    carryable_by = []
    tds = a.parent.find_next_siblings("td",class_="yes")
    for td in tds:
        if td.a is None:
            continue
        href = td.a.get("href")
        creature_id = href.split("/",maxsplit=-1)[-1]
        carryable_by.append(creature_id)
    return carryable_by

def get_weight_reductions(soup:BeautifulSoup)->list:
    """ 获取生物减重 """
    span = soup.find("span",id="Weight_Reduction")
    if span is None:
        return []
    weight_reductions = []
    table = span.parent.find_next_sibling("table")
    rows = table.select("tr")
    for i in range(1,len(rows)):
        weight_reduction = {}
        tds=rows[i].select("td")
        title = tds[0].a.get("title")
        item_id = idmapper.get_id(title)
        weight_reduction["id"] = item_id
        # weight_reduction["name"] = tds[0].get_text(strip=True)
        weight_reduction["value"] = tds[1].get_text(strip=True)
        weight_reductions.append(weight_reduction)
    return weight_reductions

def get_spawn_commands(soup:BeautifulSoup)->dict:
    """ 获取生物的生成代码 """
    info_framework=soup.find("div",class_="info-framework")
    spawn_command_spans=info_framework.select(".mw-collapsible-content>span")    # 获取代码div内的span标签
    if len(spawn_command_spans)==0:
        return []
    spawn_commands=[]
    i=0
    while i<len(spawn_command_spans):
        command_id={}
        # 获取种类名称
        if spawn_command_spans[i].get("class") is None:
            command_id["name"] = spawn_command_spans[i].get_text(strip=True).replace("Variant ","")
            i+=1
        else:
            command_id["name"] = ""
        # 获取代码
        while i<len(spawn_command_spans) and spawn_command_spans[i].get("class") is not None:
            if spawn_command_spans[i].get("class")[0] == "copy-clipboard":
                command = spawn_command_spans[i].get_text()
                if "Blueprint" in command:
                    command_id["blueprintId"] = command[16:-11]
                else:
                    command_id["entityId"] = command[13:]
                i+=1
        spawn_commands.append(command_id)
    return spawn_commands

def get_eggs(soup:BeautifulSoup)->list:
    """ 获取生物产下的蛋 """
    a = soup.find("a",title="Breeding",text="Reproduction")
    if a is None:
        return []
    div=a.parent.parent.find_next_sibling()
    key=div.div.get_text(strip=True)
    if key != "Egg":
        return []
    eggs = []
    div = div.find("div",class_="info-arkitex-right")
    for a in div.select("a"):
        title = a.get("title")
        item_id = idmapper.get_id(title)
        if "Egg" in item_id and item_id not in eggs:
            eggs.append(item_id)
    return eggs

def get_breed(soup:BeautifulSoup)->dict:
    """ 获取生物的繁育信息 """
    a=soup.find("a",text="Reproduction")    # 定位到含有Reproduction的a标签
    if a is None:
        return {}
    breed={}
    div = a.parent.parent                   # 定位到a标签上两层的div标签
    rows = div.find_next_siblings()         # div标签后面的所有div就是数据，每个div对应一行数据
    for row in rows:
        key = row.div.text.strip()
        text = row.find("div",class_="info-arkitex-right").text
        # 孵化的温度范围
        if key == "Incubation Range":
            temperature_range = text.split(" °C / ",maxsplit=1)[0]
            min_temperature,max_temperature = temperature_range.split(" - ")
            breed["minTemperature"] = float(min_temperature)
            breed["maxTemperature"] = float(max_temperature)
        # 孵化时间(卵生动物)
        elif key=="Incubation Time":
            breed["incubationTime"] = utils.time_to_seconds(text)
        # 怀孕时间(哺乳动物和卵胎生动物)
        elif key=="Gestation Time":
            breed["gestationTime"] = utils.time_to_seconds(text)
        # 婴儿时间
        elif key=="Baby Time":
            breed["babyTime"] = utils.time_to_seconds(text)
        # 幼崽时间
        elif key=="Juvenile Time":
            breed["juvenileTime"] = utils.time_to_seconds(text)
        # 青年时间
        elif key=="Adolescent Time":
            breed["adolescentTime"] = utils.time_to_seconds(text)
        # 总时间
        elif key=="Total Maturation Time":
            breed["totalTime"] = utils.time_to_seconds(text)
        # 繁育间隔
        elif key=="Breeding Interval":
            min_interval,max_interval = text.split("\xa0-\xa0")
            breed["minInterval"] = utils.time_to_seconds(min_interval)
            breed["maxInterval"] = utils.time_to_seconds(max_interval)
    return breed

def get_habitats(soup:BeautifulSoup)->list:
    """ 获取生物的分布图 """
    spawn_div = soup.select_one(".info-spawn>div")
    if spawn_div is None:
        return []
    habitats = []
    # 检查是否有变种
    has_variant = False
    for content in spawn_div.find_all("div",class_="wds-tab__content",recursive=False):
        if content.ul is not None:
            has_variant = True
            break
    # 如果有变种的话，提取出所有的种类名还有装有分布图信息的div标签
    if has_variant:
        species_list = [a.get_text(strip=True) for a in spawn_div.div.select("a")]
        divs = spawn_div.find_all("div",class_="wds-tab__content",recursive=False)
    else:
        species_list = ["default"]
        divs = [spawn_div]
    # 检查
    if len(species_list)!=len(divs):
        raise ValueError("种类的数量与分布数据的数量不一致")
    for i in range(len(species_list)):
        habitat = {"id":f"species{i}"}
        # 种类名
        habitat["species"] = species_list[i]
        # 是否为活动生物
        habitat["isEventCreature"] = divs[i].find("div",class_="info-spawningmap-event") is not None
        # 各个地图的名称
        maps =[a.get_text(strip=True) for a in divs[i].select(".wds-tabs__wrapper a")]
        map_containers = divs[i].select(".wds-tab__content")
        if len(maps)!=len(map_containers):
            raise ValueError("地图的数量与分布数据的数量不一致")
        habitat["spawningData"] = []
        for j in range(len(maps)):
            spawning_data = {"id":f"map{i}{j}"}
            # 地图名称
            spawning_data["mapName"] = maps[j]
            # 提示信息
            spawning_data["notice"] = map_containers[j].p.get_text().strip()
            # 地图背景文件
            map_img = map_containers[j].select_one(".spawningMap-map img")
            if map_img is not None:
                img_name = map_img.get("data-image-name")
                spawning_data["mapFile"] = img_name
                if img_name not in spawning_maps:
                    src = map_img.get("data-src")
                    if src is None:
                        src = map_img.get("src")
                    src = src.split("/revision/",maxsplit=1)[0]
                    downloader.add_task(src, spawning_maps_path/img_name)
                    spawning_maps.add(img_name)
            # 分布数据，一张svg图
            spawning_img = map_containers[j].select_one(".svgCreatureMap img")
            if spawning_img is not None:
                img_name = spawning_img.get("data-image-name")
                spawning_data["spawningSvg"] = img_name
                if img_name not in spawning_svgs:
                    src = spawning_img.get("data-src")
                    if src is None:
                        src = spawning_img.get("src")
                    src = src.split("/revision/",maxsplit=1)[0]
                    downloader.add_task(src, spawning_svgs_path/img_name)
                    spawning_svgs.add(img_name)
            habitat["spawningData"].append(spawning_data)
        habitats.append(habitat)
    return habitats

def get_gallery(soup:BeautifulSoup)->list:
    """ 获取相册 """
    gallery = []
    for li in soup.select(".gallery>.gallerybox"):
        photo = {}
        img = li.img
        # 图片的文件名
        img_name = img.get("data-image-name")
        photo["file"] = img_name
        # 如果图片不存在的话，则加入到下载队列
        if img_name not in gallery_files:
            src = img.get("data-src")
            if src is None:
                src = img.get("src")
            src = src.split("/revision/",maxsplit=1)[0]
            downloader.add_task(src, gallery_path/img_name)
            gallery_files.add(img_name)
        # 图片的描述
        div = li.find("div",class_="gallerytext")
        caption = div.text
        caption = caption.strip()
        photo["caption"] = caption
        gallery.append(photo)
    return gallery

def get_changelogs(soup:BeautifulSoup)->list:
    """ 获取生物的更新日志 """
    span = soup.find("span",id="Changelog")
    if span is None:
        return []
    changelogs = []
    table = span.parent.find_next_sibling()
    for tr in table.select("tr"):
        tds = tr.select("td")
        if len(tds)==0:
            continue
        log={}
        log["patch"] = tds[0].get_text().strip()
        changes = tds[1].text
        changes = changes.replace("\xa0","")
        changes = changes.strip()
        log["changes"] = [change for change in changes.split("\n")]
        changelogs.append(log)
    return changelogs

def get_dossier_description(soup:BeautifulSoup)->dict:
    """ 获取图鉴的文字描述 """
    span = soup.find("span",id="Dossier")
    if span is None:
        return None
    div = soup.find("div",class_="dossier-background")
    if div is None:
        return None
    description={}
    # 生物的正式名称和拉丁学名
    b=div.find("b",text="Species")
    if b is not None:
        p=b.find_next_sibling("p")
        description["formalName"] = p.get_text(strip=True)
        description["species"] = description["formalName"]
    # 生物所生活的时期
    b=div.find("b",text="Time")
    if b is not None:
        p=b.find_next_sibling("p")
        description["time"]=p.get_text(strip=True)
    # 生物的野生状态描述
    dt=soup.find("dt",text="Wild")
    if dt is not None:
        p=dt.parent.find_next_sibling("p")
        description["wild"]=p.get_text(strip=True)
    # 生物的已知信息描述
    dt=soup.find("dt",text="Known Information")
    if dt is not None:
        p=dt.parent.find_next_sibling("p")
        description["knownInformation"]=p.get_text(strip=True)
    # 生物的家养状态描述
    dt=soup.find("dt",text="Domesticated")
    if dt is not None:
        p=dt.parent.find_next_sibling("p")
        description["domesticated"]=p.get_text(strip=True)
    return description

def get_trivia(soup:BeautifulSoup)->list:
    """ 生物的小知识 trivia:琐事,单数形式为trivium """
    span= soup.find("span",id="Notes/Trivia")
    if span is None:
        span = soup.find("span",id="Notes")
    if span is None:
        return []
    ul=span.parent.find_next_sibling("ul")
    if ul is None:
        return []
    trivia=[]
    for li in ul.find_all("li",recursive=False):
        trivium = {}
        texts = li.text.split("\n")
        trivium["content"]=texts[0]
        trivium["notes"] = texts[1:]
        trivia.append(trivium)
    return trivia

def scrap_all_creatures(translate=True,update_data=False,update_icon=False,update_dossier=False,update_sound=False):
    # 黑名单
    black_list = {}
    # 已有的生物数据
    creatures = utils.read_data("data/creatures.json")
    scraped = set()
    # 请求并解析生物信息列表页
    print("正在请求列表页")
    resp=session.get("https://ark.fandom.com/wiki/Creatures")
    creatures_page = BeautifulSoup(resp.content,"lxml")
    trs = creatures_page.select(".cargo-creature-table>tbody>tr")
    rows = trs[1:]
    # 请求变种生物的列表页
    print("正在请求变种生物列表")
    resp=session.get("https://ark.fandom.com/wiki/Creatures/Variants")
    variants_page = BeautifulSoup(resp.content,"lxml")
    trs=variants_page.select(".cargo-creature-table>tbody>tr")
    rows.extend(trs[1:])
    # 请求并解析“抓取/携带”列表页
    print("正在请求抓取列表页")
    resp = session.get("https://ark.fandom.com/wiki/List_of_carryable_creatures")
    carryable_page = BeautifulSoup(resp.content,"lxml")
    # 需要翻译的数据
    need_translate = {}
    # 遍历每一行
    for i in range(0,len(rows)):
        tds = rows[i].select("td")
        # 取出title并且根据title获取生物的id
        title = tds[0].select("a")[-1].get("title")
        id=idmapper.get_id(title)
        # 跳过黑名单中的生物
        if id in black_list:
            print(f"【已忽略】第{i}行黑名单中的生物:{id}")
            continue
        # 如果设置成不更新旧数据，则跳过
        if id in creatures and not update_data:
            print(f"【已忽略】第{i}行的旧数据:{id}")
            continue
        # 生物信息页的url
        wiki_id = idmapper.get_wiki_id(title)
        wiki_id = wiki_id.split("#",maxsplit=1)[0]
        # 获取wiki_id，并判断是否已经爬取过
        if wiki_id in scraped:
            print(f"【已爬取】第{i}行的数据已跳过:https://ark.fandom.com/wiki/{wiki_id}")
            continue
        else:
            scraped.add(wiki_id)
        # 访问生物信息页面
        url=f"https://ark.fandom.com/wiki/{wiki_id}"
        print(f"正在爬取第{i}行的数据:{url}")
        resp = session.get(url)
        # 如果存在旧数据，则在旧数据的基础上更新，否则，新建数据
        if id in creatures:
            creature=creatures[id]
        else:
            creature={"id":id,"wikiId":wiki_id}
        creature_page = BeautifulSoup(resp.content,"lxml")  # 解析页面
        # 名字和别名
        creature["name"] = get_name(creature_page)
        creature["nameEn"] = creature["name"]
        creature["aliases"] = creature.get("aliased","")
        # 食性(diet)
        creature["diet"]=tds[2].get_text(strip=True)
        # 性情(temperament)
        creature["temperament"]=tds[3].get_text(strip=True)
        # 是否可驯服(tameable)
        creature["tameable"]=tds[4].get_text(strip=True) == "Yes"
        # 是否可骑乘(rideable)
        creature["rideable"]=tds[5].get_text(strip=True) == "Yes"
        # 是否可繁殖(breedable)
        creature["breedable"]=tds[6].get_text(strip=True) == "Yes"
        # 鞍具解锁等级
        creature["saddleLevel"]=tds[7].get_text(strip=True)
        # 获取生物所属的组
        creature["groups"] = get_groups(creature_page)
        # 获取生物图标
        creature["icon"] = get_icon(creature_page,update_icon=update_icon)
        # 获取生物图鉴
        creature["dossier"] = get_dossier(creature_page,update_dossier=update_dossier)
        # 获取生物小知识
        creature["trivia"] = get_trivia(creature_page)
        # 获取图鉴描述
        creature["dossierDescription"] = get_dossier_description(creature_page)
        # 获取生物的叫声
        creature["sound"] = get_creature_sound(creature_page, update_sound=update_sound)
        # 生物是否免疫眩晕
        creature["torporImmune"]=get_torpor_immune(creature_page)
        # 生物是否免疫辐射
        creature["radiationImmune"]=get_radiation_immune(creature_page)
        # 是否支持无鞍骑乘
        creature["noSaddleNeeded"] = get_no_saddle_needed(creature_page)
        # 生物是否支持骑乘武器
        creature["riderWeaponary"]=get_rider_weaponary(creature_page)
        # 生物的排泄物
        creature["feces"] = get_feces(creature_page)
        # 拖拽重量
        creature["dragWeight"] = get_drag_weight(creature_page)
        # 基础经验值
        creature["baseXP"] = get_base_xp(creature_page)
        # 生物的驯服方式
        if creature["tameable"]:
            creature["tamingMethod"] = get_taming_methods(creature_page)
        # 生物的繁殖信息
        if creature["breedable"]:
            creature["breed"]=get_breed(creature_page)
        # 生物可装备的物品
        creature["equipments"] = get_equipments(creature_page)
        # 获取生物可装备的皮肤
        creature["skins"] = get_skins(creature_page)
        # 生物的掉落物品
        creature["drops"] = get_drops(creature_page)
        # 生物的资源减重
        creature["weightReductions"] = get_weight_reductions(creature_page)
        # 生物掉落的战利品
        creature["loots"] = get_loots(creature_page)
        # 获取可被哪些陷阱困住
        creature["trappedBy"] = get_trapped_by(creature_page)
        # 生物可伤害的建筑
        creature["canDamage"] = get_can_damage(creature_page)
        # 从抓取列表页中查询可被哪些生物抓取
        creature["carryableBy"] = get_carryable_by(carryable_page,creature["id"])
        # 生物的属性信息
        creature["stats"] = get_stats(creature_page)
        # 生物的攻击
        creature["attacks"] = get_attacks(creature_page)
        # 获取生物的所有生成代码
        creature["spawnCommands"]=get_spawn_commands(creature_page)
        # 获取生物的分布数据
        creature["habitats"] = get_habitats(creature_page)
        # 生物的相册图片
        creature["gallery"] = get_gallery(creature_page)
        # 生物的更新日志
        creature["changelogs"] = get_changelogs(creature_page)
        # 爬取完毕后更新数据
        creatures[id]=creature
        # 记录下需要翻译的数据
        need_translate[id]=creature
    # 翻译
    if translate:
        translator.translate_creatures(need_translate,update_translations=True)
    # 保存数据
    utils.save_data(creatures, "data/creatures.json")

def update_translations():
    creatures = utils.read_data("data/creatures.json")
    translator.translate_creatures(creatures,update_translations=False)
    utils.save_data(creatures, "data/creatures.json")

def add_custom_data():
    # 载入数据
    creatures = utils.read_data("data/creatures.json")
    # 载入自定义数据
    custom_data = utils.read_data("custom-data/creatures.json")
    # 将自定义数据添加到数据中
    for id in custom_data:
        data = custom_data[id]
        creature = creatures[id]
        # 别名
        if "aliases" in data:
            creature["aliases"] = data["aliases"]
        # 相册
        if "gallery" in data:
            gallery = data["gallery"]
            for photo in creature["gallery"]:
                if photo not in gallery:
                    gallery.append(photo)
            creature["gallery"] = gallery
        # 介绍视频
        if "introduceVideo" in data:
            creature["introduceVideo"] = data["introduceVideo"]
        # 科普视频
        if "sciencePopularizingVideo" in data:
            creature["sciencePopularizingVideo"] = data["sciencePopularizingVideo"]
    # 保存数据
    utils.save_data(creatures, "data/creatures.json")