import asyncio
import datetime
import logging
import os
import sys
import time

import requests
from bs4 import BeautifulSoup

from config.dir_config import LOG_DIR

logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
handler = logging.FileHandler(
    filename=os.path.join(LOG_DIR, str(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")) + ".log"), mode="w",
    encoding="utf-8")
handler.setLevel(logging.DEBUG)
handler2 = logging.StreamHandler()
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(handler2)

from dao.BaseRepository import BaseRepository

# PAGE_NUM = 1

baseRep = BaseRepository(collectionname="aligames")


async def crapy_AliGames(PAGE_NUM):
    chongfu = 0
    start_time = time.time()
    DOWNLOAD_HOME_URL = "https://down.ali213.net/new/index_" + str(PAGE_NUM) + ".html"
    r = requests.get(DOWNLOAD_HOME_URL)
    html = r.content
    # print(html)
    soup = BeautifulSoup(html, 'html5lib')
    # print(soup)
    task_async = []
    items = soup.select("#rqjxhb > div.famous-ul-container > div>.famous-li")

    for item in items:
        if chongfu is not None and chongfu >= 5:
            logger.info("chongfudayu10")
            sys.exit()
        # print(item)
        link = item.select_one(".content-a")
        if link is not None:

            if item.select_one(".game-name") is not None:
                game_name = item.select_one(".game-name").string
                _, num, msg = baseRep.search_no_page({"name": game_name})
                if num == 0:
                    logger.info("{}不存在,开始爬取!".format(game_name))
                    task_async.append(asyncio.create_task(getDetail(link.get("href"), game_name)))
                else:
                    logger.error("{}已存在,停止爬取!".format(game_name))
                    # chongfu = chongfu + 1
    [await t for t in task_async]
    logger.info("第{0}页,耗时{1}秒".format(PAGE_NUM, str(time.time() - start_time)))
    # lis = soup.select(".a_click")
    # for l in [li for li in lis if "暂无资源" not in str(li)]:
    #     # print(l.get("href"))
    #     getDetail(l.get("href"))


async def getDetail(url, game_name):

    url = "https://down.ali213.net" + url
    logger.info("爬取链接:" + str(url))
    r = requests.get(url)
    r.encoding = "gbk2312"
    html = r.text
    # url.indexOf('https://so.hyds360.com') == -1 ? ('https://so.hyds360.com'+url) :url;
    soup = BeautifulSoup(html, 'html5lib')
    baiduyun_code = ""

    game_type = soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(1) > a').string if soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(1) > a') is not None else "未知类型"
    game_publish_date = soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(2) > a > span').string if soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(2) > a > span') is not None else "未知日期"
    game_producer = soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(3)').string if soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_info.mt5 > ul > li:nth-child(3)') is not None else "未知厂商"
    game_lang = soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_tit > div.detail_game_l_r_ctit > span:nth-child(2)').string if soup.select_one(
        '#top > div.detail_game_l > div.detail_game_l_r > div.detail_game_l_r_tit > div.detail_game_l_r_ctit > span:nth-child(2)') is not None else "未知厂商"
    game_introduce = soup.select_one(
        '#yxjs > div.detail_body_left_info_con').string if soup.select_one(
        '#yxjs > div.detail_body_left_info_con') is not None else "未知内容"
    game_play_introduce = soup.select_one(
        'body > div.content.clear.game > div.Content_L > div.GmL_1 > p:nth-child(4)').string if soup.select_one(
        'body > div.content.clear.game > div.Content_L > div.GmL_1 > p:nth-child(4)') is not None else "未知玩法"
    game_content_introduce = soup.select_one(
        'body > div.content.clear.game > div.Content_L > div.GmL_1 > p:nth-child(6)').string if soup.select_one(
        'body > div.content.clear.game > div.Content_L > div.GmL_1 > p:nth-child(6)') is not None else "未知游戏内容"
    game_img_url = soup.select_one(
        '#bimg > div:nth-child(2) > div.detail_body_con_jt_con_img > img').get(
        "src") if soup.select_one(
        '#bimg > div:nth-child(2) > div.detail_body_con_jt_con_img > img') is not None else "未知图片URL"
    # downJSURL=str(soup.select_one('#EyeableArea > script:nth-child(19)').string).replace("var returnUrl='","").replace("';","")
    # print(downJSURL)
    downJSURL = soup.select("script")
    # print(downJSURL[20].string)
    downURL: str = str(downJSURL[20].string).replace('var downUrl ="', '').replace('";', "").strip() if len(
        downJSURL) >= 21 else None
    # print(downJSURL[20])    # aid = soup.select_one("#aid").get("value") if soup.select_one("#aid") is not None else None
    print(game_name, url, downURL)
    if downURL.upper() == "NONE":
        print("getbad:", downJSURL[20])
    if downURL is not None and downURL.lower().startswith("/down"):
        downURL = "https://www.soft8899.com" + downURL
        html = requests.get(downURL).text
        soup = BeautifulSoup(html, 'html5lib')
        downURL = soup.select_one("#gowpbtn").get("href") if soup.select_one("#gowpbtn") is not None else None
        if downURL is not None:
            # time.sleep(1)
            html = requests.get(downURL).text
            soup = BeautifulSoup(html, 'html5lib')
            baiduyun_url = soup.select_one("#wpbtn")
            if baiduyun_url is not None:
                baiduyun_url = baiduyun_url.get("href")
            torrent_url = soup.select_one("#btbtn")
            if torrent_url is not None:
                torrent_url = torrent_url.get("href")
            game_size = soup.select_one("body > div.yxhz_n1_container > div.n1_content > ul > li.gameSize > span")
            if game_size is not None:
                game_size = game_size.string
            game = {
                "name": game_name,
                "size": game_size,
                "lang": game_lang,
                "type": game_type,
                "producer": game_producer,
                "publish_date": game_publish_date,
                "introduce": game_introduce,
                "play_introduce": game_play_introduce,
                "content_introduce": game_content_introduce,
                "img_url": game_img_url,
                "baiduyun_url": baiduyun_url,
                "baiduyun_code": baiduyun_code,
                "torrent_url": torrent_url
            }

            logger.info(str(game))
            _, num, msg = baseRep.search_no_page({"name": game_name})
            if num == 0:
                baseRep.insert_one(game)
                print("已保存:" + str(game))


# browser = webdriver.Chrome()  # 声明浏览器
# browser.get(url)

if __name__ == "__main__":
    for x in range(1, 466, 1):
        if x > 0:
            logger.info("爬取第" + str(x) + "页")
            asyncio.run(crapy_AliGames(x))
