from requests import Session
from bs4 import BeautifulSoup
from bs4.element import Tag,NavigableString
import downloader
from pathlib import Path
import urllib
import json
import re
import translator
import utils
import idmapper
import filemapper
import wiki_decoder

session = Session()
session.trust_env = False
session.adapters.DEFAULT_RETRIES=10
session.headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67"}

creature_aliases = utils.read_data("data/scraper/creature-aliases.json")
# wiki的生物数据库
creature_database = utils.read_data("data/scraper/creature-database.json")

# 栖息地地图的存放路径
spawning_maps_path = Path("img/spawning-maps")
# 所有已下载的栖息地地图文件
spawning_maps={file.name for file in spawning_maps_path.glob("*")}

# 分布数据svg图的存放路径
spawning_svgs_path = Path("img/spawning-svg")
# 所有已下载的svg图
spawning_svgs = {file.name for file in spawning_svgs_path.glob("*")}

# 生物图标路径
icon_path = Path("img/creatures-icon")
# 现存的图标文件
icon_files = {file.name for file in icon_path.glob("*")}

# 生物图鉴路径
dossier_path = Path("img/dossiers")
# 现存的生物图鉴
dossier_files = {file.name for file in dossier_path.glob("*")}

# 生物叫声路径
sound_path = Path("audio/creatures-sound")
# 现存的叫声文件
sound_files = {file.name for file in sound_path.glob("*")}

# 生物相册的路径
gallery_path = Path("img/gallery")
# 现在的生物图片
gallery_files = {file.name for file in gallery_path.glob("*")}

# 属性状态图标路径
stats_path = Path("img/stats")
# 现存的属性图标文件
stats_files = {file.name for file in stats_path.glob("*")}

def get_titles():
    titles = []
    url = "https://ark.fandom.com/wiki/Category:Creatures"
    while True:
        print("正在请求:",url)
        resp = session.get(url)
        soup = BeautifulSoup(resp.content,"lxml")
        h2 = soup.find("h2",text='Pages in category "Creatures"')
        div = h2.find_next_sibling("div",class_="mw-content-ltr")
        titles.extend((a.get("title") for a in div.select("a")))
        a = h2.find_next_sibling("a",text="next page")
        if a is not None:
            url = f"https://ark.fandom.com{a.get('href')}"
        else:
            break
    return titles

def get_database_id(wiki_id):
    """ 将wiki_id转成生物数据库内所用的id """
    database_id = re.sub("[\-_,]", "", wiki_id)
    database_id = database_id.lower()
    if database_id in creature_aliases:
        database_id = creature_aliases[database_id]
    if database_id in creature_database:
        return database_id
    return "blank"

def get_icon(soup:BeautifulSoup,update_icon:bool)->str:
    """ 获取生物图标 """
    img=soup.find("div",class_="info-X2-25").img
    if img is None:
        return "Missing.png"
    img_name = img.get("resource").replace("./File:","")
    if img_name == "Blank.png":
        return "Missing.png"
    if img_name not in icon_files or update_icon:
        src = img.get("data-src")  # 取出图标的地址
        if src is None:
            src = img.get("src")
        src=src.split("/revision/")[0] # 去掉参数，获得原图的地址
        downloader.add_task(src, icon_path/img_name)
        icon_files.add(img_name)
    return img_name

def get_name(soup:BeautifulSoup)->str:
    """ 获取生物的名字 """
    div = soup.find("div",class_="info-X2-75")
    if div is None:
        raise Exception("未找到名字")
    return div.get_text(strip=True)

def get_dossiers(soup:BeautifulSoup,update_dossier)->list:
    """ 获取生物的图鉴 """
    dossiers = []
    imgs = soup.select(".info-column>.nomobile img")
    for img in imgs:
        img_name = img.get("resource")
        img_name = img_name.replace("./File:","")
        if img_name in ("Dossier_Empty.png","Blank.png"):
            continue
        dossiers.append(img_name)
        if img_name not in dossier_files or update_dossier:
            src=img.get("data-src")
            if src is None:
                src = img.get("src")
            src=src.split("/revision/")[0]
            downloader.add_task(src, dossier_path/img_name)
            dossier_files.add(img_name)
    return dossiers

def get_sound(soup:BeautifulSoup,update_sound)->str:
    """ 获取生物的叫声 """
    info_framework = soup.find("div",class_="info-framework")
    audio = info_framework.find("audio")
    if audio is None:
        return None
    src=audio.source.get("src").split("/revision/")[0]
    audio_name = src.split("/",maxsplit=-1)[-1]
    if audio_name not in sound_files or update_sound:
        downloader.add_task(src, sound_path/audio_name)
        sound_files.add(audio_name)
    return audio_name

def get_tameable(params:dict)->bool:
    """ 获取生物是否可驯服 """
    if "tameable" in params:
        return params["tameable"]["wt"] == "Yes"
    else:
        return False

def get_rideable(params:dict)->bool:
    """ 获取生物是否可骑乘 """
    if "rideable" in params:
        return params["rideable"]["wt"] == "Yes"
    else:
        return False

def get_no_saddle_needed(soup:BeautifulSoup)->bool:
    """ 查询生物是否支持无鞍骑乘 """
    a = soup.find("a",title="Category:Creatures that can be ridden without saddles",text="No Saddle Needed")
    return a is not None

def get_rider_weaponary(database_id:str)->bool:
    """ 获取是否支持骑乘武器 """
    if "taming" not in creature_database[database_id]:
        return False
    return creature_database[database_id]["taming"].get("ballowmountedweaponry") == "Yes"

def get_radiation_immune(database_id)->bool:
    """ 获取生物是否免疫辐射 """
    return creature_database[database_id].get("radiationimmune")=="Yes"

def get_breedable(database_id:dict)->bool:
    """ 获取生物是否可繁育 """
    creature = creature_database[database_id]
    return "breeding" in creature and "maturationtime" in creature["breeding"]
    # if "breedable" in params:
    #     return params["breedable"]["wt"] == "Yes"
    # else:
    #     return False

def get_breedable_comment(params:dict)->bool:
    """ 生物的繁育备注信息 """
    if "breedable comment" in params:
        return params["breedable comment"]["wt"]
    else:
        return ""

def get_breeding(database_id):
    breeding_data=creature_database[database_id]["breeding"]
    breeding={}
    if "egg" in breeding_data:
        egg = breeding_data["egg"].split(",",maxsplit=1)[0]
        if egg != "":
            breeding["egg"] = idmapper.get_id(egg)
    if "mintemp" in breeding_data:
        breeding["minTemperature"] = breeding_data["mintemp"]
        breeding["maxTemperature"] = breeding_data["maxtemp"]
    if "incubationtime" in breeding_data:
        breeding["incubationTime"] = breeding_data["incubationtime"]
    if "gestationtime" in breeding_data:
        breeding["gestationTime"] = breeding_data["gestationtime"]
    breeding["maturationTime"] = breeding_data["maturationtime"]
    breeding["minInterval"] = breeding_data["mintimebetweenmating"]
    breeding["maxInterval"] = breeding_data["maxtimebetweenmating"]
    return breeding

def get_groups(database_id)->list:
    """ 获取生物所属的分组 """
    groups = creature_database[database_id].get("group","")
    if groups == "":
        return []
    return groups.split(", ")

def get_drag_weight(database_id)->float:
    """ 获取生物的拖拽重量 """
    return creature_database[database_id].get("dragweight",0)

def get_base_xp(database_id)->float:
    """ 获取击杀经验的基础值 """
    return creature_database[database_id].get("killxpbase",0)

def get_equipments(database_id)->list:
    """ 获取生物可装备的物品 """
    if "taming" not in creature_database[database_id]:
        return []
    equipments=creature_database[database_id]["taming"].get("equipment","")
    if equipments == "":
        return []
    return [idmapper.get_id(item) for item in equipments.split(", ")]

def get_skins(soup:BeautifulSoup)->list:
    """ 获取生物的皮肤 """
    a = soup.find("a",text="Skins")
    if a is None:
        return []
    skins = []
    div = a.parent.find_next_sibling()
    for a in div.select("a"):
        title = a.get("title")
        if title is None:
            continue
        item_id = idmapper.get_id(title)
        skins.append(item_id)
    return skins

def get_drops(database_id)->list:
    """ 获取生物的掉落物品 """
    drops = []
    items = creature_database[database_id].get("canbeharvestedfor","").split(", ")
    items.extend(creature_database[database_id].get("specialloot","").split(", "))
    for item in items:
        if item != "":
            drops.append(idmapper.get_id(item))
    return drops

def get_loots(database_id)->list:
    """ 生物掉落的特殊物品 """
    loots = creature_database.get("lootitems","")
    if loots == "":
        return []
    return [idmapper.get_id(item) for item in loots.split(", ")]

def get_weight_reductions(soup:BeautifulSoup)->list:
    """ 获取生物减重 """
    h3 = soup.find("h3",id="Weight_Reduction")
    if h3 is None:
        return []
    span = h3.find_next_sibling("span",typeof="mw:Transclusion")
    data_mw = span.get("data-mw")
    data = json.loads(data_mw)
    params = data["parts"][0]["template"]["params"]
    weight_reductions = []
    for key,value in params.items():
        weight_reduction = {
            "id":key,
            "value":value['wt']
        }
        weight_reductions.append(weight_reduction)
    return weight_reductions

def get_trapped_by(database_id)->list:
    """ 获取可困住生物的陷阱 """
    immobilized_by = creature_database[database_id].get("immobilizedby","")
    if immobilized_by == "":
        return []
    return [idmapper.get_id(item) for item in immobilized_by.split(", ")]

def get_can_damage(database_id)->list:
    """ 获取生物可伤害的建筑 """
    can_damages = creature_database[database_id].get("candamage","")
    if can_damages == "":
        return []
    return [f"{item}_Structures" for item in can_damages.split(", ")]

def get_carryable_by(database_id)->list:
    """ 获取生物可被哪些生物抓起 """
    carryable_by = []
    for creature_name in creature_database[database_id].get("carryableby","").split(", "):
        if creature_name == "":
            continue
        creature_id = idmapper.get_id(creature_name)
        if creature_id not in carryable_by:
            carryable_by.append(creature_id)
    return carryable_by

def get_spawn_commands(soup:BeautifulSoup)->dict:
    """ 获取生物的生成代码 """
    info_framework=soup.find("div",class_="info-framework")
    spawn_command_spans=info_framework.select(".mw-collapsible-content>span")    # 获取代码div内的span标签
    if len(spawn_command_spans)==0:
        return []
    spawn_commands=[]
    i=0
    while i<len(spawn_command_spans):
        command_id={}
        # 获取种类名称
        if spawn_command_spans[i].get("class") is None:
            command_id["name"] = spawn_command_spans[i].get_text(strip=True).replace("Variant ","")
            i+=1
        else:
            command_id["name"] = ""
        # 获取代码
        while i<len(spawn_command_spans) and spawn_command_spans[i].get("class") is not None:
            if spawn_command_spans[i].get("class")[0] == "copy-clipboard":
                command = spawn_command_spans[i].get_text()
                if "Blueprint" in command:
                    command_id["blueprintId"] = command[16:-11]
                else:
                    command_id["entityId"] = command[13:]
                i+=1
        spawn_commands.append(command_id)
    return spawn_commands

def get_stats(soup:BeautifulSoup)->list:
    """ 获取生物的属性信息 """
    stats = []
    h3 = soup.find("h3",id="Base_Stats_and_Growth")
    if h3 is None:
        return []
    tag = h3.find_next_sibling()
    if tag is None:
        return []
    elif tag.name in ("p","ul","span"):
        tag = tag.find_next_sibling()
    elif tag.name in ("h1","h2","h3","h4"):
        return []
    elif tag.name == "div" and tag.get("style") == "padding-bottom:5px;padding-left:25px":
        # 独角兽的属性与庞马一致，所以没有页面中没有列出
        return []
    # 提取出所有种类的名称
    if tag.get("class") is None:
        names=["default"]
    else:
        names=[a.get_text(strip=True) for a in tag.select(".wds-tabs__wrapper a")]
    # 属性表
    stat_tables = soup.find_all("table",attrs={"data-description":"Base Stats and Growth"})
    if len(stat_tables)==0:
        stat_tables = tag.select("table")
    # 速度表
    speed_tables = soup.find_all("table",attrs={"data-description":"Movement Speed"})
    if len(speed_tables)==0:
        speed_tables = tag.find_next_sibling().select("table")
    if len(names)!=len(stat_tables) or len(stat_tables)!= len(speed_tables):
        raise ValueError("种类名称的数量与状态数据的数量不对应")
    for i in range(len(names)):
        stat = {}
        stat["id"] = f"stats{i}"
        stat["name"] = names[i]
        # 获取属性值
        attributes = {}
        for tr in stat_tables[i].select("tr"):
            attribute = {}
            tds = tr.select("th[style='text-align: left'],td")
            if len(tds)==0:
                continue
            # 属性的名称
            name = tds[0].get_text(strip=True)
            attribute["name"] = name
            # 属性的图标
            img = tds[0].img
            img_name = img.get("data-image-key")
            if img_name is None:
                img_name = img.get("resource")
                img_name = img_name.replace("./File:","")
            attribute["icon"] = img_name
            if img_name not in stats_files:
                src = img.get("data-src")
                if src is None:
                    src = img.get("src")
                src = src.split("/revision/",maxsplit=1)[0]
                downloader.add_task(src, stats_path/img_name)
                stats_files.add(img_name)
            # 属性的名称转驼峰命名作为id和键值
            key=utils.to_camel(name)

            # 属性的基础值
            value = tds[1].get_text(strip=True)
            if value == "N/A":
                attribute["base"] = 0
            elif value == "?":
                attribute["base"] = 0
            else:
                value = value.split("/")[0]
                value = value.replace("%","")
                attribute["base"] = float(value)
            # 野生状态下的每级加点
            value = tds[2].get_text(strip=True)
            if value == "N/A":
                attribute["wildIncrease"] = 0
            elif value == "?":
                attribute["wildIncrease"] = 0
            else:
                value = value.split("/")[0]
                value = value.replace("%","")
                attribute["wildIncrease"] = float(value)
            # 如果是攻击力、移动速度、电荷辐射范围的话，属性值和加点转换成百分比(非小数，100%用100表示)
            if key in ["meleeDamage","movementSpeed","chargeEmissionRange"]:
                if attribute["base"]==0:
                    attribute["wildIncrease"] = 0
                else:
                    attribute["wildIncrease"] = attribute["wildIncrease"]/attribute["base"]*100
                attribute["base"]=100
            # 如果是不可驯服的话，跳过后续的属性
            if len(tds)<4:
                continue
            # 驯服后的每级加点
            value = tds[3].get_text(strip=True)
            if value == "N/A":
                attribute["tamedIncrease"] = 0
            elif value == "?":
                attribute["tamedIncrease"] = 0
            else:
                value = value.replace("+","")
                value = value.replace("%","")
                attribute["tamedIncrease"] = float(value)
            # 驯服后的奖励Ta
            attribute["tamedAddition"] = tds[4].get_text(strip=True)
            # 驯服后的奖励Tm
            attribute["tamedMultiplication"] = tds[5].get_text(strip=True)

            # 如果基础值与野生加点值同时为0，则跳过该属性
            if attribute["base"]==0 and attribute["wildIncrease"]==0:
                continue
            attribute["id"] = key
            attributes[key]=attribute
        stat["attributes"] = attributes
        # 获取移速
        speeds = {}
        for tr in speed_tables[i].select("tr"):
            tds = tr.select("th[style='text-align:left'],td")
            if len(tds)==0:
                continue
            speed = {}
            speed["name"]= tds[0].get_text(strip=True)
            key = utils.to_camel(speed["name"])
            speed["id"] = key
            speeds[key] = speed
            speed["wildBase"] = tds[1].get_text(strip=True) # 野生状态下的基础移速
            if len(tds)==8:
                speed["tamedBase"] = tds[2].get_text(strip=True) # 驯服后的基础移速
                speed["wildSprinting"] = tds[4].get_text(strip=True) # 野生状态下的冲刺速度
                speed["tamedSprinting"] = tds[5].get_text(strip=True) # 驯服后的冲刺速度
                speed["staminaCost"] = tds[7].get_text(strip=True)  # 冲刺时的耐力消耗
            elif len(tds) == 3:
                speed["wildSprinting"] = tds[2].get_text(strip=True) # 野生状态下的冲刺速度
            elif len(tds) == 12:
                speed["tamedBase"] = tds[2].get_text(strip=True)
                speed["riddenBase"] = tds[4].get_text(strip=True) # 骑乘状态下的基础移速
                speed["wildSprinting"] = tds[6].get_text(strip=True)
                speed["tamedSprinting"] = tds[7].get_text(strip=True)
                speed["riddenSprinting"] = tds[9].get_text(strip=True)  # 骑乘状态下的冲刺速度
                speed["staminaCost"] = tds[11].get_text(strip=True)
            else:
                raise ValueError(f"未知的表格格式,列数{len(tds)}")
            # 将文本格式的数值转换成浮点数
            for key,value in speed.items():
                if key in ("name","id"):
                    continue
                try:
                    speed[key] = float(value)
                except:
                    speed[key] = 0
        stat["speeds"] = speeds
        stats.append(stat)
    return stats

def get_attacks(soup:BeautifulSoup)->list:
    """ 获取攻击数据 """
    attacks = []
    # 新版格式，目前采用这种格式的有飞龙，还有最近增强的斑龙
    rows = soup.select(".dino-attack-info tr")
    if len(rows)>0:
        for row in rows:
            attack={}
            attack["name"] = row.th.big.text
            # 获取键位
            keys = []
            tags = row.th.select(".dino-attack-keybinds>*")
            for tag in tags:
                key={}
                if tag.name=="span":
                    key["name"] = tag.text
                    key["platform"] = "PC"
                elif tag.name in ("figure-inline","figure","a"):
                    img = tag.img
                    if img is None:
                        continue
                    key["name"] = img.get("alt")
                    # 判断平台
                    if key["name"] in ("Left","Right"):
                        key["platform"] = "pc"
                    else:
                        img_key = img.get("data-image-key")
                        if img_key is None:
                            resource = img.get("resource")
                            img_key=resource.replace("./File:","")
                        key["platform"] = img_key.split("_",maxsplit=1)[0]
                elif tag.name == "br":
                    continue
                else:
                    raise NotImplementedError(f"未知的标签名:{tag.name}")
                keys.append(key)
            attack["keys"] = keys
            # 获取消耗的耐力
            div = row.th.find("div",style="margin-top: 4px")
            if div is None:
                attack["staminaCost"] = 0
            else:
                attack["staminaCost"] = div.a.get_text(strip=True)
            # 获取攻击的描述
            attack["descriptions"] = [li.text.strip() for li in row.td.select("li")]
            # 获取攻击的伤害
            b_list = row.td.find_all("b",text=re.compile(r".*\d*\.?\d* damage.*"))

            for b in b_list:
                match=re.search(r"(\d+\.?\d*).*damage", b.get_text())
                if match is not None:
                    attack["damage"] = float(match.group(1))
                    break
            # 添加数据
            attacks.append(attack)
        return attacks
    # 旧版格式，一堆非常复杂的表格
    h3 = soup.find("span",id="Base_Stats_and_Growth")
    if h3 is not None:
        div = h3.parent.find_next_sibling()
        # 如果还记录有其他变种的攻击数据的话，则缩小搜索范围，只提取原型的攻击数据
        if div.get("class") is not None and div.get("class")[0] == "tabber":
            soup=div.find("div",class_="wds-tab__content")
    tables = soup.find_all("table",attrs={"data-description":"Attacks"})
    if len(tables)>0:
        for table in tables:
            attack = {}
            rows = table.select("tr")
            # 第一行内有攻击的名称
            attack["name"]=rows[0].th.text
            # 提取出位于第2行的攻击描述
            attack["descriptions"] = [rows[1].td.text]
            # 提取出位于第2行的键位
            keys=[]
            key_tag = rows[1].td.select("a[class='image'],kbd") # 含有键位的a标签
            for tag in key_tag:
                key={}
                if tag.name=="a":
                    key["name"] = tag.get("title")
                else:
                    key["name"] = tag.get_text()
                string = tag.next_sibling.string
                match = re.match(".*\((.*)\).*", string)
                key["platform"]= match.group(1)
                keys.append(key)
            attack["keys"] = keys
            # 第2行内有耐力消耗
            tds = rows[2].select("td")
            attack["staminaCost"]=tds[0].text
            # 如果有第6行的话，第6行的第1个td标签内是攻击的伤害
            if len(rows)>=6:
                if rows[5].td is None:
                    attack["damage"] = 0
                elif rows[5].td.get_text(strip=True) == "":
                    attack["damage"] = 0
                else:
                    attack["damage"] = float(rows[5].td.get_text(strip=True))
            attacks.append(attack)
        return attacks
    return attacks

def get_filename(text:str):
    """ 从形如[[File:Spawning Surface Reaper King Ghost Aberration.svg|link=|380px]]格式的被字符串中提取出文件名 """
    match = re.match(r"^\[\[File:([^\|]*).*\]\]$",text)
    return match.group(1)

def format_habitats_name(name:str)->str:
    """ 格式化分布数据中的种类名和地图名 """
    name = name.split("|-|")[-1]
    name = name.replace("=", "")
    name = name.strip()
    return name

def get_habitats(soup:BeautifulSoup)->list:
    """ 获取生物的分布图 """
    # 获取最外层的div
    div = soup.find("div",class_="info-spawn")
    if div is None:
        return []
    # 获取装有数据的div
    div = div.find("div",typeof="mw:Extension/tabber")
    if div is None:
        return []
    # 取出标签中的数据
    data_mw=div.get("data-mw")
    data = json.loads(data_mw)
    html = data["body"]["extsrc"]
    soup = BeautifulSoup(html,"lxml")
    habitats = []
    spawning_divs = soup.select(".spawningMap")
    if len(spawning_divs)==0:
        spawning_divs = [soup]
    for spawning_div in spawning_divs:
        habitat = {}
        element = spawning_div.previous_sibling
        # 判断是否为活动生物
        if type(element) is Tag and element.get("class") is not None:
            habitat["isEventCreature"] = True
            element = element.previous_sibling
        else:
            habitat["isEventCreature"] = False
        # 获取生物的名字
        if element is None:
            species = ""
        elif type(element) is Tag:
            species = element.text
        else:
            species = element.string
        # 对生物的名字进行格式化
        habitat["species"] = format_habitats_name(species)
        habitat["spawningData"] = []
        for map_container in spawning_div.select(".noviewer"):
            spawning_data = {}
            # 地图的名字
            map_name = map_container.previous_sibling
            spawning_data["mapName"] = format_habitats_name(map_name.string)
            
            # 地图的文件名
            div = map_container.find("div",class_="spawningMap-map")
            map_file = get_filename(div.text)
            spawning_data["mapFile"] = map_file
            if map_file not in spawning_maps:
                src = filemapper.get_file_src(map_file)
                downloader.add_task(src, spawning_maps_path/map_file)
                spawning_maps.add(map_file)
            
            # SVG分布图名
            div = map_container.find("div",class_="svgCreatureMap")
            svg = get_filename(div.text)
            svg = svg.replace(":","")
            spawning_data["spawningSvg"] = svg
            if svg not in spawning_svgs:
                src = filemapper.get_file_src(svg)
                downloader.add_task(src, spawning_svgs_path/svg)
                spawning_svgs.add(svg)
            
            habitat["spawningData"].append(spawning_data)

        habitats.append(habitat)
    return habitats

def get_gallery(soup:BeautifulSoup)->list:
    """ 获取相册 """
    gallery = []
    for li in soup.select(".gallery>.gallerybox"):
        photo = {}
        img = li.img
        # 图片的文件名
        img_name = img.get("resource")
        img_name = img_name.replace("./File:","")
        img_name = img_name.replace("_"," ")
        photo["file"] = img_name
        # 如果图片不存在的话，则加入到下载队列
        if img_name not in gallery_files:
            src = img.get("data-src")
            if src is None:
                src = img.get("src")
            src = src.split("/revision/",maxsplit=1)[0]
            downloader.add_task(src, gallery_path/img_name)
            gallery_files.add(img_name)
        # 图片的描述
        div = li.find("div",class_="gallerytext")
        caption = div.text
        caption = caption.strip()
        photo["caption"] = caption
        gallery.append(photo)
    return gallery

def get_changelogs(soup:BeautifulSoup)->list:
    """ 获取生物的更新日志 """
    h2 = soup.find("h2",id="Changelog")
    if h2 is None:
        return []
    changelogs = []
    table = h2.find_next_sibling()
    for tr in table.select("tr"):
        tds = tr.select("td")
        if len(tds)==0:
            continue
        log={}
        log["patch"] = tds[0].get_text().strip()
        changes = tds[1].text
        changes = changes.replace("\xa0","")
        changes = changes.strip()
        log["changes"] = [change for change in changes.split("\n")]
        changelogs.append(log)
    return changelogs

def get_trivia(soup:BeautifulSoup)->list:
    """ 生物的小知识 trivia:琐事,单数形式为trivium """
    h2= soup.find("h2",id="Notes/Trivia")
    if h2 is None:
        h2 = soup.find("h2",id="Notes")
    if h2 is None:
        return []
    ul=h2.find_next_sibling("ul")
    if ul is None:
        return []
    trivia=[]
    for li in ul.find_all("li",recursive=False):
        trivium = {}
        texts = li.text.split("\n")
        trivium["content"]=texts[0]
        trivium["notes"] = texts[1:]
        trivia.append(trivium)
    return trivia

def get_dossier_texts(soup:BeautifulSoup)->list:
    """ 获取生物的图鉴文本 """
    h2 = soup.find("h2",id="Basic_Info")
    if h2 is None:
        return []
    section = h2.find_next_sibling("section")
    p = section.find(typeof="mw:Transclusion")
    if p is not None:
        data_mw=p.get("data-mw")
        data_item = {}
        for key,value in json.loads(data_mw)["parts"][0]["template"]["params"].items():
            data_item[key] = value["wt"]
        data={"":data_item}
    else:
        div = section.find("div",typeof="mw:Extension/tabber")
        if div is not None:
            data_mw=div.get("data-mw")
            data = json.loads(data_mw)
            tabber_text = data["body"]["extsrc"]
            data = wiki_decoder.decode_tabber_text(tabber_text)
        else:
            return []
    dossier_texts = []
    for key,value in data.items():
        dossier_text = {}
        dossier_text["name"] = key
        # 右上角的便笺内容(Sticky note)
        if "species" in value:
            dossier_text["species"] = value["species"]
            dossier_text["formalName"] = value["species"]
        if "time" in value:
            dossier_text["time"] = value["time"]
        if "temperament" in value:
            dossier_text["temperament"] = value["temperament"]
        if "diet" in value:
            dossier_text["diet"] = value["diet"]
        if "modelNumber" in value:
            dossier_text["modelNumber"] = value["modelNumber"]
        if "height" in value:
            dossier_text["height"] = value["height"]
        if "weight" in value:
            dossier_text["weight"] = value["weight"]
        if "threatLevel" in value:
            dossier_text["threatLevel"] = value["threatLevel"]
        if "primaryFunction" in value:
            dossier_text["primaryFunction"] = value["primaryFunction"]
        # 笔记正文内容
        if "observations" in value:
            dossier_text["observations"] = value["observations"]
        if "history" in value:
            dossier_text["history"] = value["history"]
        if "wild" in value:
            dossier_text["wild"] = value["wild"]
        if "info" in value:
            dossier_text["info"] = value["info"]
        if "domesticated" in value:
            dossier_text["domesticated"] = value["domesticated"]
        for i in range(1,5):
            if f"head{i}" in value:
                dossier_text[f"head{i}"] = value[f"head{i}"]
            if f"info{i}" in value:
                dossier_text[f"info{i}"] = value[f"info{i}"]
        # 当数据不为空时才进行添加
        if len(dossier_text.keys())>1:
            dossier_texts.append(dossier_text)
    return dossier_texts

def scrap_one_creature(wiki_id,update_icon=False,update_dossier=False,update_sound=False)->dict:
    url = f"https://services.fandom.com/visual-editor-parsoid-facade/fandomdesktop/ark.fandom.com/v1/page/html/{wiki_id}"
    resp = session.get(url)
    soup = BeautifulSoup(resp.content,"lxml")
    ##################
    # 将请求到的网页缓存到本地
    with open(f"data/pages/{wiki_id}","wb") as f:
        f.write(resp.content)
    return {"id":""}
    ##################
    # 读取本地缓存的网页
    with open(f"data/pages/{wiki_id}","rb") as f:
        content = f.read()
    soup = BeautifulSoup(content,"lxml")
    ##################

    # 一个div标签，里面的data-mw属性存储了一些生物的基本数据，json格式
    div = soup.find("div",style="display:none;",typeof="mw:Transclusion")
    data_mw=div.get("data-mw")
    data=json.loads(data_mw)
    params=data["parts"][0]["template"]["params"]

    # 该生物在wiki生物数据库内的id
    database_id = get_database_id(wiki_id)

    creature = {}
    # 生物的id
    id = idmapper.format_wiki_id(wiki_id)
    creature["id"] = id
    # 生物的名字，英文名以及别名
    creature["name"] = get_name(soup)
    creature["nameEn"] = creature["name"]
    creature["aliases"] = ""
    # 生物的图标
    creature["icon"] = get_icon(soup, update_icon)
    # 生物的图鉴
    creature["dossiers"] = get_dossiers(soup,update_dossier)
    # 生物的叫声
    creature["sound"] = get_sound(soup,update_sound)
    # 生物是否可被驯服
    creature["tameable"] = get_tameable(params)
    # 生物是否可骑乘
    creature["rideable"] = get_rideable(params)
    # 生物是否支持无鞍骑乘
    creature["noSaddleNeeded"] = get_no_saddle_needed(soup)
    # 生物是否支持骑乘武器
    creature["riderWeaponary"]=get_rider_weaponary(database_id)
    # 生物是否免疫辐射
    creature["radiationImmune"] = get_radiation_immune(database_id)
    # 生物是否可繁殖(breedable)
    creature["breedable"] = get_breedable(database_id)
    # 如果可繁殖，则获取和繁殖相关的信息
    if creature["breedable"]:
        creature["breedableComment"] = get_breedable_comment(params)
        creature["breeding"] = get_breeding(database_id)
    # 生物所属的组
    creature["groups"] = get_groups(database_id)
    # 生物的拖拽重量
    creature["dragWeight"] = get_drag_weight(database_id)
    # 击杀经验的基础值
    creature["baseXP"] = get_base_xp(database_id)
    # 生物可装备的物品
    creature["equipments"] = get_equipments(database_id)
    # 获取生物可装备的皮肤
    creature["skins"] = get_skins(soup)
    # 获取生物的掉落物
    creature["drops"] = get_drops(database_id)
    # 获取生物的战利品
    creature["loots"] = get_loots(database_id)
    # 生物的资源减重
    creature["weightReductions"] = get_weight_reductions(soup)
    # 获取可被哪些陷阱困住
    creature["trappedBy"] = get_trapped_by(database_id)
    # 获取生物可伤害哪些建筑
    creature["canDamage"] = get_can_damage(database_id)
    # 获取生物可被哪些生物抓起
    creature["carryableBy"] = get_carryable_by(database_id)
    # 获取生物的所有生成代码
    creature["spawnCommands"]=get_spawn_commands(soup)
    # 生物的属性信息
    creature["stats"] = get_stats(soup)
    # 生物的攻击
    creature["attacks"] = get_attacks(soup)
    # 获取生物的分布数据
    creature["habitats"] = get_habitats(soup)
    # 生物的相册图片
    creature["gallery"] = get_gallery(soup)
    # 生物的更新日志
    creature["changelogs"] = get_changelogs(soup)
    # 获取生物小知识
    creature["trivia"] = get_trivia(soup)
    # 生物的图鉴文本
    creature["dossierTexts"] = get_dossier_texts(soup)
    return creature

def scrap_all_creatures(translate=True,update_icon=False,update_dossier=False,update_sound=False):
    # 获取所有生物的title
    titles = get_titles()
    # 将title转成wiki_id，将跳转到同一页面的生物过滤掉
    wiki_ids = []
    for title in titles:
        if title in ("Alpha Creatures"):
            continue
        wiki_id = idmapper.get_wiki_id(title)
        if wiki_id not in wiki_ids:
            wiki_ids.append(wiki_id)
    
    ########################
    # 将wiki_id缓存到本地
    with open("data/scraper/creature_ids.json","w") as f:
        json.dump(wiki_ids,f)
    ########################
    # 读取缓存在本地的wiki_id
    with open("data/scraper/creature_ids.json","r") as f:
        wiki_ids = json.load(f)
    #######################

    creatures = {}
    for i in range(0,len(wiki_ids)):
        wiki_id = wiki_ids[i]
        print(f"正在请求第{i}条数据:{wiki_ids[i]}")
        creature = scrap_one_creature(wiki_id,update_icon=update_icon,update_dossier=update_dossier,update_sound=update_sound)
        creatures[creature["id"]] = creature
    if translate:
        translator.translate_creatures(creatures,update_translations=True)
    utils.save_data(creatures, "data/creatures.json")
    downloader.start_download()

def update_translations():
    creatures = utils.read_data("data/creatures.json")
    translator.translate_creatures(creatures,update_translations=False)
    utils.save_data(creatures, "data/creatures.json")

def add_custom_data():
    # 载入数据
    creatures = utils.read_data("data/creatures.json")
    # 载入自定义数据
    custom_data = utils.read_data("custom-data/creatures.json")
    # 将自定义数据添加到数据中
    for id in custom_data:
        data = custom_data[id]
        creature = creatures[id]
        # 拼音
        creature["pinyin"] = data["pinyin"]
        # 别名
        if "aliases" in data:
            creature["aliases"] = data["aliases"]
        # 相册
        if "gallery" in data:
            gallery = data["gallery"]
            for photo in creature["gallery"]:
                if photo not in gallery:
                    gallery.append(photo)
            creature["gallery"] = gallery
        # 介绍视频
        if "introduceVideo" in data:
            creature["introduceVideo"] = data["introduceVideo"]
        # 科普视频
        if "sciencePopularizingVideo" in data:
            creature["sciencePopularizingVideo"] = data["sciencePopularizingVideo"]
    # 复制相册文件
    import shutil
    for file in Path("custom-data"/gallery_path).glob("*"):
        if file.name not in gallery_files:
            shutil.copy2(file, gallery_path)
    # 保存数据
    utils.save_data(creatures, "data/creatures.json")