from sys import path as path2
from time import sleep, strftime, localtime, gmtime
from threading import Thread, active_count

from bs4 import BeautifulSoup
from rich import print as rprint

path2.append('c:/users/tian/desktop')
from Service import service
from service import req, title2tag


class Caoliu:
    def __init__(self):
        service.path = 'd:/data/caoliu.db'
        self.newNum = 0     # 新增记录的数量

    def prepare(self, startIndex, endIndex):
        for i in range(startIndex, endIndex):
            indexUrl = f'https://t66y.com/thread0806.php?fid=25&search=&page={i}'
            while True:
                try:
                    node_h3 = req.req2(indexUrl).find(id='tbody')('h3')
                    break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(3)
            for h3 in node_h3:
                Thread(target=self.get, args=(h3, i)).start()
                while True:
                    if active_count() < 5:
                        break
                    else:
                        sleep(1)      # 休眠可以减少请求，节省系统资源；休眠时间应该是根据实际情况调查出来的
            while True:         # 等待爬取线程都结束
                if active_count() == 1:
                    break
            rprint('[red]给你 5 秒钟时间切换节点')   # 爬完一页换个节点
            sleep(5)
        while True:         # 等待爬取线程都结束
            if active_count() == 1:
                break
            else:
                sleep(0.2)
        rprint(f'Caoliu 爬取完毕，本次新增 {self.newNum} 条数据')

    def get(self, h3, i):
        title = ''.join(h3.a.string.strip().split()).lower()      # 删除空格或多个连续空格
        tag = title2tag.aboutTitle(service.path, title, i, 'Caoliu', 3)
        if not tag:         # 含过滤词或已记录
            return

        url = 'https://t66y.com/' + h3.a.attrs['href']
        if url.startswith('https://t66y.com/read.php'):     # 重定向页面
            rprint('遇到了重定向页面')
            url = 'https://t66y.com/' + req.req2(url).find('div', class_='cleanbg')('a')[-1].attrs['href']
        soup = req.req2(url)
        node_a = soup.find('div', id='conttpc')('a')
        hrefs = [a.attrs['href'] for a in node_a]
        for href in hrefs:
            if 'rmdown.com' in href:
                link = href
                break
        else:       # 出现了未知情况，link 不存在
            rprint('出现异常情况，已写入本地文件')
            with open('d:/errors_caoliu.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url}\n')
            return    # 记下异常 url，提前结束 get() 函数，避免报错
        if 'rmdown' in link and 'hash' in link:
            magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1][3:]
            magnet = magnet.split("&")[0].lower()                 # 去掉 & 后面的参数，并将所有字母小写
        else:
            magnet = ''
        timestamp = soup.find('div', class_='tipad')('span')[1].attrs['data-timestamp']
        postDate = strftime('%Y-%m-%d', gmtime(int(timestamp)))
        recordDate = strftime('%Y-%m-%d %H:%M', localtime())
        status = 0
        service.exec('insert into guochan(tag, title, url, link, magnet, postdate, recorddate, status) values (?, ?, ?, ?, ?, ?, ?, ?)',
                     (tag, title, url, link, magnet, postDate, recordDate, status))
        self.newNum += 1
        rprint(f'{self.newNum} ({i})')








