import os
from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
from tqdm import trange

thisPath=os.environ['myCodeNexusPath'].replace('\\','/')+'/py/nhentai'
Cookie=os.environ['nhentaiCookie']

headers = {
    'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',

    'Cookie':Cookie
}


def getTorrent(url):

    # 获取url中的序号作为标识
    i=url.split('/')[-2]+'.torrent'
    request = urllib.request.Request(url+'/download',headers=headers)
    reponse = urllib.request.urlopen(request).read()

    with open(thisPath+"/torrent/"+i, "wb+") as f:
        f.write(reponse)
        f.close()


def getFavoriteUrls():

    urls=""
    for i in range(1024):
        print("---------------")
        print("开始第"+str(i+1)+"页")
        request = urllib.request.Request("https://nhentai.net/favorites/?page="+str(i+1),headers=headers)
        html = urllib.request.urlopen(request).read()

        soup = BeautifulSoup(html, 'html.parser')

        if("No favorites match your search query" not in soup.get_text()):
            # 遍历喜欢页面中的每一个具体喜欢的页面的url
            for link in soup.find_all('a'):
                url=link.get('href')+""
                if "/g/" in url:
                    if "https://nhentai.net" not in url:
                        url= "https://nhentai.net"+url
                    print(url)
                    urls=urls+"\n"+url
        
        # 如果存在,代表这一页已经超出上限了
        else:
            break
    
    # 第一行是空行
    # 返回列表方便后续遍历
    return urls.split('\n')[1:]


def delOldUrl(urls):
    # 删除已下载的torrent

    # 遍历获得已下载的文件
    result=os.walk(thisPath+"/torrent/")
    fn=""
    for dirpath,dirnames,filenames in result:
        fn=filenames

    # 将文件名转换为url
    for i in range(len(fn)):
        fn[i]="https://nhentai.net/g/"+fn[i].split('.')[0]+"/"

    # 求差集
    newUrl=list(set(urls)-set(fn))

    print("---------------")
    print("只剩下未下载的链接")
    for i in newUrl:
        print(i)
        
    return newUrl


urls=getFavoriteUrls()
urls=delOldUrl(urls)

for i in trange(len(urls)):
    getTorrent(urls[i])


