# 不能并入 numdomain 的原因：影片的信息和下载链接在一个节点里，无法通过通用的方法获取，需要有针对性的解析

from sys import path as path2
from time import sleep, localtime, strftime
import random
from threading import Thread, active_count

from bs4 import BeautifulSoup
from rich import print as rprint

path2.append('c:/users/tian/desktop')
from Service import service, ua, ips
from service import title2tag, req

class Tanhuazu:
    def __init__(self, ipsl):
        service.path = 'd:/data/tanhuazu.db'
        self.newNum = 0     # 新增记录的数量
        self.R = req.Req(ipsl)
    
    def prepare(self, startIndex, endIndex):
        for i in range(startIndex, endIndex):
            indexUrl = f'https://tanhuazu.com/forums/4/page-{i}?no_date_limit=1'
            while True:
                try:
                    node_div = self.R.req(indexUrl).find('div', class_='structItemContainer')('div', class_='structItem-title')
                    break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(2)
            for div in node_div:
                Thread(target=self.get, args=(div, i)).start()
                while True:
                    if active_count() < 20:
                        break
                    else:
                        sleep(1)
        while True:         # 等待爬取线程都结束
            if active_count() == 1:
                break
            else:
                sleep(0.2)
        rprint(f'Tanhuazu 爬取完毕，本次新增 {self.newNum} 条数据')
    
    def get(self, div, i):
        title = ''.join(div.get_text().strip().split()).lower()      # 删除空格或多个连续空格
        tag = title2tag.aboutTitle(service.path, title, i, 3)
        if not tag:
            return
        url = 'https://tanhuazu.com' + div('a')[-1].attrs['href']      # 有的div节点下有两个a节点
        if service.query('select * from guochan where title=?', (title,)) != []:    # 判断存在该记录
            rprint(f'[yellow]【{title}】已记录 ({i})')
            return
        soup = self.R.req(url)
        try:
            node_a = soup.find('div', class_='bbWrapper')('a', class_='link link--external')
        except:
            print('出错，url：', url)
            return
        ss = [a.string for a in node_a]     # 不能简单地认定link，防止帖子中有多个链接
        for s in ss:
            if s.startswith('https://s1.obdown.com/do.php') or s.startswith('https://www.obdown.com/link.php'):
                link = s
                break
        else:       # 未找到 link
            rprint(f'[red]出现未知情况，已写入本地文件 ({i}), url: {url}')
            with open('d:/data/errors_tanhuazu.txt', 'a', encoding='utf-8') as f:
                f.write(f'\n{url}')
            return
        if link.startswith('https://s1.obdown'):
            magnet = self.R.req(link).find('a', text='磁力下載').attrs['href'].split('&')[0]
        elif link.startswith('https://www.obdown'):
            magnet = self.R.req(link).ul.a.attrs['href'].split('&')[0]
        else:
            magnet = ''
        magnet = magnet.split("&")[0].lower()                 # 去掉 & 后面的参数，并将所有字母小写
        timestamp= eval(soup.find('div', class_='p-description').time.attrs['data-time'])
        postdate = strftime('%Y-%m-%d', localtime(timestamp))
        recorddate = strftime('%Y-%m-%d %H:%M', localtime())
        status = 0
        service.exec('insert into guochan(tag, title, url, link, magnet, postdate, recorddate, status) values (?, ?, ?, ?, ?, ?, ?, ?)',
                     (tag, title, url, link, magnet, postdate, recorddate, status))
        self.newNum += 1
        rprint(f'{self.newNum} ({i})')

