# 不能并入 numdomain 的原因：postDate 的获取特殊

from sys import path as path2
from time import sleep, localtime, strftime
from threading import Thread, active_count

from bs4 import BeautifulSoup
from rich import print as rprint

path2.append('c:/users/tian/desktop')
from Service import service
from service import title2tag, req



class Sehuatang:
    def __init__(self):
        service.path = 'd:/data/sehuatang.db'
        self.newNum = 0     # 新增记录的数量

    def prepare(self, startIndex, endIndex, step=1):
        for i in range(startIndex, endIndex, step):
            indexUrl = f'https://www.sehuatang.net/forum-2-{i}.html'
            while True:
                try:
                    soup = req.reqSehuatang(indexUrl)
                    node_tbody = soup(lambda node: node.name == 'tbody' and node.attrs.get('id', '').startswith('normalthread'))
                    if len(node_tbody) == 0:
                        rprint('网页怎么这么烂，又出错了')
                        print(soup)
                    else:
                        break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(3)
            for tbody in node_tbody:
                Thread(target=self.get, args=(tbody, i)).start()
                while True:
                    if active_count() < 16:
                        break
                    else:
                        sleep(1)
            while True:         # 等待爬取线程都结束
                if active_count() == 1:
                    break
            rprint('[red]给你 5 秒钟时间切换节点')   # 爬完一页换个节点
            sleep(5)
        while True:         # 等待爬取线程都结束
            if active_count() == 1:
                break
            else:
                sleep(0.2)
        rprint(f'Sehuatang 爬取完毕，本次新增 {self.newNum} 条数据')

    def get(self, tbody, i):
        _a = tbody.find('a', class_='s xst')
        title = ''.join(_a.string.strip().split()).lower()
        tag = title2tag.aboutTitle(service.path, title, i, 'Sehuatang', 3)
        if not tag:         # 含过滤词或已记录
            return

        url = 'https://www.sehuatang.net/' + _a.attrs['href']
        _soup = req.reqSehuatang(url)
        _div = _soup.find('div', class_='pti')
        if _div.span.has_attr('title'):
            postDate = _div.span.attrs['title']
        else:
            postDate = _div.em.string.split('发表于 ')[-1]
        _info = list(_soup.find('div', class_='t_fsz').td.stripped_strings)
        for _s in _info:
            if '影片大小' or '影片容量' in _s:
                size = _s.split('：')[-1]
            if 'xt=urn:btih:' in _s:    # 有个视频竟然少了开头的 m，太不仔细了，差评
                magnet = _info[-2].lower()
        if not ('size' in locals() and 'magnet' in locals()):    # 极个别情况，找不到 size 或 magnet
            rprint('异常，未找到 size 或 magnet，url:', url)
            with open('d:/exception_sehuatang.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url}\n')        # 将网址写入本地，人工确定
            return
        recordDate = strftime('%Y-%m-%d %H:%M', localtime())
        status = 0
        service.exec('insert into guochan (tag, title, url, magnet, size, postdate, recorddate, status) values (?, ?, ?, ?, ?, ?, ?, ?)',
                     (tag, title, url, magnet, size, postDate, recordDate, status))
        self.newNum += 1
        rprint(f'{self.newNum} ({i})')

