from sys import path as path2
from time import sleep, localtime, strftime
from threading import Thread, active_count

import requests
from bs4 import BeautifulSoup
from rich import print as rprint

path2.append('c:/users/tian/desktop')
from Service import service, ua
from service import title2tag, req


# 获取磁力时不取巧时的函数
# def req(url, dire=1):
#     if dire > 0:
#         while True:
#             try:
#                 if dire == 1:
#                     r = requests.get(url, headers={'User-Agent': ua.ua}, timeout=3)
#                 elif dire == 2:
#                     posturl = 'http://www.rmdown.com/download.php?action=magnet&ref=' + url.split('=')[-1]
#                     r = requests.get(posturl, headers={'User-Agent': ua.ua, 'referer': url, 'cookie': 'PHPSESSID=7dg1dl9qeidd3r93o32ku1c1q2; ses=e36239979143842f730a18734303fc78'}, timeout=3)
#                 break
#             except:
#                 rprint('[red]连接超时，重连中。。。')
#         if dire == 1:
#             soup = BeautifulSoup(r.text, 'lxml')
#             return soup
#         elif dire == 2:
#             return r.text
#     else:
#         posturl = 'https://www.82bt.com/downt-m.php' if 'hash' in url else 'https://www.82bt.com/downs-m.php'
#         while True:
#             try:
#                 r = requests.post(posturl, data={'code': url.split('=')[-1]}, timeout=3,
#                                   headers={'User-Agent': ua.ua, 'referer': url})
#                 break
#             except:
#                 rprint('[red]连接超时，重连中。。。')
#         return r.text


def req82bt(link):
    while True:
        try:
            r = requests.post('https://www.82bt.com/downs-m.php', data={'code': link.split('=')[-1]}, timeout=3,
                            headers={'User-Agent': ua.ua, 'referer': link})
            break
        except Exception as e:
            rprint('请求 82bt 报错：', e)
            sleep(0.5)
    return r.text

class Gcbt:
    def __init__(self, ipsl):
        service.path = 'd:/data/gcbt.db'
        self.newNum = 0     # 新增记录的数量
        self.R = req.Req(ipsl)
    
    def prepare(self, startIndex, endIndex):
        for i in range(startIndex, endIndex):
            indexUrl = f'https://gcbt2.xyz/page/{i}'
            while True:
                try:
                    node_h2 = self.R.req(indexUrl).find_all('h2', class_='entry-title')
                    break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(2)
            for h2 in node_h2:
                Thread(target=self.get, args=(h2, i)).start()
                sleep(0.08)
                while True:
                    if active_count() < 11:
                        break
        while True:         # 等待爬取线程都结束
            if active_count() == 1:
                break
            else:
                sleep(0.2)
        rprint(f'Gcbt 爬取完毕，本次新增 {self.newNum} 条数据')

    def get(self, h2, i):
        title = h2.a.attrs['title']
        title = ''.join(title.split()).lower()      # 删除空格或多个连续空格
        tag = title2tag.aboutTitle(service.path, title, i, 3)
        if not tag:
            return
        url = h2.a.attrs['href']
        # soup = self.R.req(url)
        soup = req.req2(url)
        postDate = soup.time.attrs['datetime'].split('T')[0]
        # while True:
        #     try:         # 网站 502
        #         soup = self.R.req(url)
        #         postDate = soup.time.attrs['datetime'].split('T')[0]
        #         break
        #     except:
        #         rprint(soup)
        #         rprint('发布日期解析出错，重试中')
        #         sleep(0.5)
        div = soup.find('div', class_='entry-content u-text-format u-clearfix')      # 实操中发现，个别网页的下载链接不在节点 p 中，所以还是检索节点 div 下的所有文本吧
        for subnode in div:
            sl = list(subnode.stripped_strings)
            if not sl:      # 跳过为空的
                continue
            for s in sl:
                if 'http' in s:
                    link = s
        if not ('link' in locals()):    # 极个别情况，文中无链接
            print('未找到链接')
            with open('d:/exception_gcbt.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url}\n')        # 将网址写入本地，人工确定
            return
        if 'ww1.k00ppc' in link:
            # magnet = self.R.req(link).find('div', 'uk-width-1-1 uk-text-center dlboxbg')('a')[1].attrs['href']
            magnet = 'magnet:?xt=urn:btih:' + link.split('/')[-1]
        elif 'ww1.picd231' in link or 'ww1.91dfjh' in link or 'picd232' in link:     # 顶级域名可能是 top/cc/pw/net/club/xyz/work
            magnet = 'magnet:?xt=urn:btih:' + link.split('/')[-1]
        elif 'rmdown' in link:
            # url = 'http://www.rmdown.com/download.php?action=magnet&ref=' + link.split('=')[-1]
            # # 发现，如果在浏览器里打开一下，这里就能得到磁力链接，否则就会返回 Illegal request
            # magnet = self.R.req(url, dire=2).split('&')[0]
            magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1][3:]
        elif 'www.82bt.com' in link:
            if 'hash' in link:
                magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1][3:]
            else:
                agnet = req82bt(link)
                if agnet.startswith('<br />'):      # 网站不能正确显示磁力链接
                    return                          # 不记录此视频
                magnet = 'magnet:?xt=urn:btih:' + agnet
        else:
            rprint(f'[red]未知链接：{link}')
            magnet = '未知'
        magnet = magnet.split("&")[0].lower()                 # 去掉 & 后面的参数，并将所有字母小写
        recorddate = strftime('%Y-%m-%d %H:%M', localtime())
        status = 0
        service.exec('insert into guochan(tag, title, url, link, magnet, postdate, recorddate, status) values (?, ? ,? , ?, ?, ?, ?, ?)',
                    (tag, title, url, link, magnet, postDate, recorddate, status))
        self.newNum += 1
        rprint(f'{self.newNum} ({i})')




'''
link 的类型：
1. http://www.rmdown.com/link.php?hash=2310b211ea784f5833626665f6b7b9eac816f8fa0ac
2. https://ww1.k00ppc.xyz/C62CD0CBA9DA7B67F2BC0C715F41C975EB7AC98F
3. http://www.82bt.com/rlink.php?ref=3897461
4. http://www.82bt.com/cao.php?hash=223631e76992fe2fe5132a72a43a287b26289b66a2f
'''
