from requests import Session
from bs4 import BeautifulSoup
import downloader
from pathlib import Path
import json
import re
import translator
import utils
import urllib
import idmapper

session = Session()
session.trust_env = False
session.adapters.DEFAULT_RETRIES=10
session.headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67"}

# 物品的图标文件的路径
item_path = Path("img/items")
# 现存的图标文件
item_imgs = {file.name for file in item_path.glob("*")}

def get_info_frameworks(soup:BeautifulSoup):
    """ 一个页面可能会对应多个物品，一个物品会放在一个info-framework内 """
    return soup.select(".info-framework")

def get_imgs(soup:BeautifulSoup,update_icon:bool)->list:
    """ 获取物品的图标 """
    div = soup.find("div",class_="info-nodescquotes")   # 装有物品信息的div
    tag = div.next_element
    imgs = tag.select("img")
    # img = soup.select(".info-column img")[0]
    img_names = []
    for img in imgs:
        img_name = img.get("data-image-name")
        img_names.append(img_name)
        # 如果图像文件不存在或者指定了要更新图标文件，则要将图标文件加入到下载队列
        if img_name not in item_imgs or update_icon:
            src = img.get("data-src")
            if src is None:
                src = img.get("src")
            src = src.split("/revision/",maxsplit=1)[0]
            downloader.add_task(src, item_path/img_name)
            item_imgs.add(img_name)
    return img_names

def get_name(soup:BeautifulSoup)->str:
    """ 获取物品的名称 """
    div = soup.find("div",class_="info-masthead")
    return div.get_text(strip=True)

def get_description(soup:BeautifulSoup)->str:
    """ 获取物品的描述 """
    td = soup.find("td",class_="quote-left")
    if td is None:
        return ""
    description = td.find_next_sibling().get_text() # 取出描述
    description = description.replace("\n","")      # 去掉换行符
    description = description.strip()               # 去掉开头和结尾的空格
    return description

def get_type(soup:BeautifulSoup)->str:
    """ 获取物品的类型 """
    div = soup.find("div",text=" Type\n  ")
    if div is None:
        return "Other"
    return div.find_next_sibling().get_text(strip=True)

def get_weight(soup:BeautifulSoup)->(float,float):
    """ 获取物品的重量 """
    div = soup.find("div",text=" Weight\n		  ")
    if div is None:
        return 0,0
    weights = div.find_next_sibling().get_text(strip=True)
    weights = weights.split("/")
    pc = float(weights[0])  # 在pc端的重量
    # 手机版的重量
    if len(weights)>1:
        mobile = float(weights[1])
    else:
        mobile = pc
    return pc,mobile

def get_stack_size(soup:BeautifulSoup)->(int,int):
    """ 获取物品的堆叠数量 """
    span = soup.find("span",text="Stack size")
    if span is None:
        return 1,1
    stack_sizes = span.parent.find_next_sibling().get_text(strip=True)
    stack_sizes = stack_sizes.split("/")
    pc = int(stack_sizes[0])
    if len(stack_sizes)>1:
        mobile = int(stack_sizes[1])
    else:
        mobile = pc
    return pc,mobile

def get_added_in(soup:BeautifulSoup)->str:
    """ 获取物品是在哪个版本加入到游戏的 """
    div = soup.find("div",text=" Added in\n		  ")
    if div is None:
        return "unknown"
    return div.find_next_sibling().get_text(strip=True)

def get_spawn_commands(soup:BeautifulSoup)->dict:
    """ 获取物品的生成代码 """
    spawn_command_spans=soup.select(".info-unit .mw-collapsible-content>span")    # 获取代码div内的span标签
    if len(spawn_command_spans)==0:
        return []
    spawn_commands=[]
    i=0
    while i<len(spawn_command_spans):
        command_ids={}
        # 如果span里面没有类名的话，里面存的是物品的名称
        if spawn_command_spans[i].get("class") is None:
            command_ids["name"] = spawn_command_spans[i].get_text().replace("Variant ","")
            i+=1
        else:
            command_ids["name"] = ""
        # 如果有类名的话，则存的是代码
        while i<len(spawn_command_spans) and spawn_command_spans[i].get("class") is not None:
            if spawn_command_spans[i].get("class")[0] == "copy-clipboard":
                command = spawn_command_spans[i].get_text()
                if "cheat gfi " in command:
                    command_ids["entityId"] = command[10:-6]
                elif "cheat giveitemnum " in command:
                    command_ids["itemId"] = command[18:-6]
                elif "cheat giveitem " in command:
                    command_ids["blueprintId"] = command[15:-6]
                elif "cheat unlockengram " in command:
                    command_ids["blueprintId"] = command[19:]
                else:
                    raise NotImplementedError("不支持的命令格式")
                i+=1
        spawn_commands.append(command_ids)
    return spawn_commands

def get_crafted_in(soup:BeautifulSoup)->list:
    """ 制作物品所需的工具台 """
    div = soup.find("div",text=" Crafted in\n		  ")
    if div is None:
        return []
    crafted_in = []
    for a in div.find_next_sibling().select("a"):
        title = a.get("title")
        if title is None:
            continue
        if title in ("ARK: Survival Evolved Mobile"):
            continue
        item_id = idmapper.get_id(title)
        if item_id not in crafted_in:
            crafted_in.append(item_id)
    return crafted_in

def get_ingredientses(soup:BeautifulSoup)->list:
    """ 制作物品所需的原材料(可能会有多种组合) """
    strings = soup.find_all(text=re.compile("^Ingredients ?\(?.*",flags=re.S))
    ingredientses = []
    # 遍历每一种原材料组合
    for string in strings:
        ingredients = {}
        div = string.parent
        ingredients["name"] = div.get_text()
        ingredients["ingredients"] = []
        # 遍历每一种组合中的所有原材料
        for div in div.find_next_sibling().find_all("div",style="padding-left:5px"):
            ingredient={}
            ingredient["id"] = idmapper.get_id(div.a.get("title"))   # 材料的id
            ingredient["quantity"]=div.b.get_text().split(" × ",maxsplit=1)[0]  # 材料的数量
            ingredients["ingredients"].append(ingredient)
        ingredientses.append(ingredients)
    return ingredientses

def get_titles(pagefrom="A"):
    url=f"https://ark.fandom.com/wiki/Category:Items?pagefrom={pagefrom}"
    print("正在请求:",url)
    resp=session.get(url)
    soup=BeautifulSoup(resp.content,"lxml")
    h2=soup.find("h2",text='Pages in category "Items"')
    div=h2.find_next_sibling("div",class_="mw-content-ltr")
    uls = div.select("ul")
    titles = []
    for ul in uls:
        for li in ul.select("li"):
            titles.append(li.a.get("title"))
    if len(titles)<=1:
        # 如果当前页只有一条数据，说明已经没有下一页了，结束递归
        return titles
    else:
        # 递归地请求下一页
        next_pagefrom=titles[-1]
        titles.extend(get_titles(pagefrom=next_pagefrom))
        return titles

def scrap_all_items(translate=True,update_data=False,update_icon=False)->None:
    """ 爬取所有物品 """
    # 黑名单
    black_list = {"Pistol_Hat_Skins"}
    # 磁盘上的数据
    items = utils.read_data("data/items.json")
    # 需要爬取的物品的id
    print("正在获取所有物品的title")
    titles = get_titles()
    # 需要翻译的物品
    need_translate = {}
    # 已爬取过的链接
    scraped = set()
    # 遍历，爬取
    for i in range(len(titles)):
        title=titles[i]
        id = idmapper.get_id(title)
        if "Mod:" in id:
            print(f"【Mod物品】已跳过第{i}行的数据:{title}")
            continue
        # 如果指定了不更新旧数据，则直接跳过
        if id in items and not update_data:
            print(f"【旧数据】已跳过第{i}行的数据:{id}")
            continue
        # 获取wiki_id判断是否已经爬取过
        wiki_id = idmapper.get_wiki_id(title)
        if wiki_id in black_list:
            print(f"【黑名单】第{i}行的数据已跳过:https://ark.fandom.com/wiki/{wiki_id}")
            continue
        if wiki_id in scraped:
            print(f"【已爬取】第{i}行的数据已跳过:https://ark.fandom.com/wiki/{wiki_id}")
        else:
            scraped.add(wiki_id)
        # 请求物品信息页
        url = f"https://ark.fandom.com/wiki/{wiki_id}"
        print(f"正在爬取第{i}行的数据:{url}")
        resp = session.get(url)
        soup = BeautifulSoup(resp.content,"lxml")
        info_frameworks=get_info_frameworks(soup)
        for (index,info_framework) in enumerate(info_frameworks):
            if len(info_frameworks)>1:
                item_id=f"{id}_{index}"
            else:
                item_id = id
            # 如果存在旧数据则使用旧数据，否则新建一份
            if item_id in items:
                item = items[item_id]
            else:
                item = {"id":item_id,"wikiId":wiki_id}
            # 从网页中解析出物品的数据
            item["name"] = get_name(info_framework)   # 物品的名字
            item["nameEn"] = item["name"]   # 物品的英文名
            item["aliases"] = item.get("aliases","")    # 物品的别名
            item["description"] = get_description(info_framework) # 物品的描述，需要翻译
            item["imgs"] = get_imgs(info_framework, update_icon=update_icon)  # 物品的图标
            item["weight"],item["weightMobile"] = get_weight(info_framework)   # 物品的重量
            item["stackSize"],item["stackSizeMobile"] = get_stack_size(info_framework)    # 物品的堆叠数量
            item["addedIn"] = get_added_in(info_framework)    # 物品在哪个版本加入的游戏
            item["type"] = get_type(info_framework)   # 物品的类型
            item["spawnCommands"] = get_spawn_commands(info_framework) # 物品生成代码
            item["craftedIn"] = get_crafted_in(info_framework)    # 制作物品所需的工具台
            item["ingredientses"] = get_ingredientses(info_framework) # 制作物品的各种原材料组合

            items[item_id] = item
            need_translate[item_id]=item
    if translate:
        translator.translate_items(need_translate,update_translations=True)
    # 保存数据
    utils.save_data(items, "data/items.json")

def update_translations():
    """ 更新物品的翻译 """
    items = utils.read_data("data/items.json")
    translator.translate_items(items,update_translations=False)
    utils.save_data(items, "data/items.json")

def add_custom_data():
    """ 添加自定义数据 """
    # 载入物品数据
    items = utils.read_data("data/items.json")
    # 载入自定义数据
    custom_data = utils.read_data("custom-data/items.json")
    for item_id in custom_data:
        # 如果物品不存在则直接添加
        if item_id not in items:
            items[item_id] = custom_data[item_id]
            continue
        item = items[item_id]
        data = custom_data[item_id]
        # 添加自定义别名
        if "aliases" in data:
            item["aliases"] = data["aliases"]
    # 保存修改后的物品数据
    utils.save_data(items, "data/items.json")
