import time

import requests
from fontTools.misc import etree

from internal import init


# from internal.search import getBeforeName


def testParse():
    with open("../file/res2.html", "r", encoding="utf-8") as fp:
        strHtml = fp.read()
        fp.close()
    e = etree.HTML(strHtml)

    steamUserUrls = []
    for i in e.xpath("//div//a//@href"):
        i = i.replace("'", "").replace("\\", "").replace('"', "")
        if "https://steamcommunity.com/profiles" in i:
            if i not in steamUserUrls:
                steamUserUrls.append(i)
    print(steamUserUrls)
    # print(e.xpath('//div[@id="community_searchresults_pagination"]//span[@class="community_searchresults_paging"]//text()'))
    pageText = e.xpath(
        '//div[@id="community_searchresults_pagination"]//span[@class="community_searchresults_paging"]//text()')
    print(pageText)
    print(pageText[0].replace("t", "").split(" "))
    print(pageText[0].replace("t", "").split(" ")[5])

    # print(e.xpath('//div//@id'))
    # print("\&quot;community_searchresults_pagination\&quot;")


# 获取搜素结果的Url
def parseUrlsPage(strData):
    # print(strData)
    e = etree.HTML(strData)
    steamUserUrls = []
    for i in e.xpath("//div//a//@href"):
        i = i.replace("'", "").replace("\\", "").replace('"', "")
        if "https://steamcommunity.com/profiles" in i:
            if i not in steamUserUrls:
                steamUserUrls.append(i)
    pageText = e.xpath(
        '//div[@id="community_searchresults_pagination"]//span[@class="community_searchresults_paging"]//text()')
    if len(steamUserUrls) == 0:
        return steamUserUrls, pageText
    # print(pageText)
    pageInfo = ""
    for i in pageText:
        if "of" in i:
            pageInfo = i
            break

    # print(steamUserUrls)
    # print(e.xpath('//div[@id="community_searchresults_pagination"]'))
    # print(e.xpath('//div//@id'))
    # print("\&quot;community_searchresults_pagination\&quot;")
    # print(steamUserUrls)
    # print(pageText)
    return steamUserUrls, pageInfo.replace("\\", "").replace("t", "").split(" ")[5].replace(",", "")


# 获取曾用名
def getBeforeName(url):
    # header
    fp = open("../file/defaultHeader", "r", encoding="utf-8")
    headers = parseHeader(fp.read())
    fp.close()
    url += "/ajaxaliases/"
    res = requests.post(url, headers=headers)
    res.encoding = "utf-8"
    time.sleep(init.defaultReqDelay)
    return res.text


# 获取steam个人信息
def parseSteamInfo(strData, steamUrl):
    e = etree.HTML(strData)
    username = e.xpath('//div[@class="persona_name"]//span[@class="actual_persona_name"]//text()')
    beforeUserName = getBeforeName(steamUrl)
    gamingStatus = e.xpath(
        '//div[@class="responsive_status_info"]//div[@class="profile_in_game persona in-game"]//div[@class="profile_in_game_name"]//text()')
    vacInfo = e.xpath('//div[@class="responsive_status_info"]//div[@class="profile_ban_status"]//text()')
    for i, el in enumerate(vacInfo):
        vacInfo[i] = el.replace("\t", "").replace("\r", "").replace("\n", "")
    friendUrls = e.xpath(
        '//div[@class="profile_friend_links profile_count_link_preview_ctn responsive_groupfriends_element"]//div//a//@href')
    for i in friendUrls:
        if "friends" in i:
            friendUrls = [i]
    recentGamesName = e.xpath(
        '//div[@class="recent_games"]//div[@class="recent_game"]//div//div[@class="game_name"]//a//text()')
    recentGamesDetail = e.xpath(
        '//div[@class="recent_games"]//div[@class="recent_game"]//div//div[@class="game_info_details"]//text()')
    for i, el in enumerate(recentGamesDetail):
        recentGamesDetail[i] = el.replace("\t", "").replace("\r", "").replace("\n", "")
    comments = e.xpath(
        '//div[@class="commentthread_comment_content"]//div[@class="commentthread_comment_text"]//text()')
    for i, el in enumerate(comments):
        comments[i] = el.replace("\t", "").replace("\r", "").replace("\n", "")
    jsonMap = {
        "steamUrl": steamUrl,
        'username': username,
        'beforeUserName': beforeUserName,
        'gamingStatus': gamingStatus,
        'vacInfo': vacInfo,
        'friends': friendUrls,
        'recentGamesName': recentGamesName,
        'recentGamesDetail': recentGamesDetail,
        'comments': comments
    }
    return jsonMap


# 获取朋友名字和url
def parseFriendship(strData):
    e = etree.HTML(strData)
    friendNames = e.xpath('//div[@class="profile_friends search_results"]//div//@data-search')
    for i, el in enumerate(friendNames):
        friendNames[i] = el.replace(" ;  ; ", "")
    # print(friendNames)
    friendUrls = e.xpath('//div[@class="profile_friends search_results"]//div//a//@href')
    # print(friendUrls)
    return friendNames, friendUrls


# 解析Cookie数据 Row格式
def parseCookie(cookieData):
    cookies = {}
    for i in cookieData.split(";"):
        key = i.split("=")[0]
        value = i.split("=")[1]
        cookies[key] = value
    return cookies


# 解析请求头格式 Row格式 可以浏览器复制到Header里边
def parseHeader(strData):
    header = {}
    for i in strData.split("\n"):
        if len(i.split(":")) == 2:
            key = i.split(":")[0].replace(" ", "")
            value = i.split(":")[1].replace(" ", "")
            if key == "Cookie":
                header[key] = value + str(init.g_sessionID)
            else:
                header[key] = value
    return header

# testParse()
