from requests import Session
from bs4 import BeautifulSoup
import re
import json
import utils

session = Session()
session.trust_env = False
session.headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67"}

def decode(data_text:str):
    """ 解析wiki的文本数据 """
    # 去掉所有的注释
    data_text=re.sub("--.*\n","\n",data_text)
    # 去掉return关键字
    data_text = data_text.replace("return","")
    # 去掉多余的逗号
    data_text = re.sub(",\s*}","}",data_text)
    # 去掉键值左边的中括号
    data_text = data_text.replace('["', '"')
    # 去掉键值右边的中括号，并把等号改成冒号
    data_text = data_text.replace('"] = ', '" : ')
    # 解析数据
    return json.loads(data_text)

def scrape_taming_creatures():
    resp = session.get("https://ark.fandom.com/wiki/Module:TamingTable/creatures")
    soup = BeautifulSoup(resp.content,"lxml")
    pre = soup.select("pre")[-1]
    data_text = pre.text
    # 对数据进行修正，将eats属性中表示集合的大括号改成中括号
    data_text_formated = ""
    rows = data_text.split("\n")
    for row in rows:
        if "eats" in row:
            row = row.replace("{","[ ")
            row = row.replace("}","]")
        data_text_formated += f"{row}\n"
    taming_data = decode(data_text_formated)
    utils.save_data(taming_data, "data/scraper/taming-creatures.json")

def scrape_taming_food():
    # 获取驯服食物
    resp = session.get("https://ark.fandom.com/wiki/Module:TamingTable/food")
    soup = BeautifulSoup(resp.content,"lxml")
    pre = soup.find("pre")
    data_text = pre.text
    taming_food = decode(data_text)
    utils.save_data(taming_food, "data/scraper/taming-foods.json")

def scrape_creature_database():
    resp = session.get("https://ark.fandom.com/wiki/Module:Dv/data")
    soup = BeautifulSoup(resp.content,"lxml")
    pre = soup.find("pre")
    data_text = pre.text
    creature_data = decode(data_text)
    # 保存解析后的数据
    utils.save_data(creature_data, "data/scraper/creature-database.json")

def scrap_creature_aliases():
    resp = session.get("https://ark.fandom.com/wiki/Module:Dv/aliases")
    soup = BeautifulSoup(resp.content,"lxml")
    pre = soup.find("pre")
    data_text = pre.text
    # 将所有的单引号替换成双引号
    data_text = data_text.replace("'",'"')
    creature_aliases = decode(data_text)
    # 保存解析后的数据
    utils.save_data(creature_aliases, "data/scraper/creature-aliases.json")