from sys import path as path2
from time import sleep, localtime, strftime
import base64, json
from threading import Thread, active_count

from rich import print as rprint
# from lxml import html
import requests

path2.append('c:/users/tian/desktop')
from Service import ua, service
from cili.service import title2tag, req

def req82bt(link):
    while True:
        try:
            r = requests.post('https://www.82bt.com/downs-m.php', data={'code': link.split('=')[-1]}, timeout=3,
                              headers={'User-Agent': ua.ua, 'referer': link})
            break
        except Exception as e:
            rprint('请求 82bt 报错：', e)
            sleep(0.5)
    return r.text

class Numdomain:
    def __init__(self, rule, starti=-1, endi=-1):
        self.newNum = 0     # 新增记录的数量
        self.rule = rule
        if self.rule['proxy'] == 'WinXray':
            rprint(f'【注意】：此网站（{self.rule["name"]}） 需要 WinXray 代理')
        if starti < 0 and endi < 0:         # 默认参数
            self.starti = self.rule['startIndex']
            self.endi = self.rule['endIndex']+1
            # self.endi = (self.rule['endIndex']+1) * 2   # 两天未运行
        elif starti < 0:
            self.starti = self.rule['startIndex']
            self.endi = endi + 1
        elif endi < 0:
            self.starti = starti
            self.endi = self.rule['endIndex']+1
        else:
            self.starti = starti
            self.endi = endi + 1

    def prepare(self):
        rprint(f'正在爬取：{self.rule["name"]}')
        for i in range(self.starti, self.endi):
            while True:
                try:
                    url = self.rule['baseUrl'] + self.rule['indexUrl']
                    middleNodes = req.req3(url.format(i)).xpath(self.rule['middleNodeXpath'])
                    if middleNodes == []:
                        pass
                    break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(3)
            for node in middleNodes:
                if self.rule['name'] == 'GCBT':
                    Thread(target=self.gcbt, args=(node, i)).start()
                else:
                    Thread(target=self.numdomain, args=(node, i)).start()
                while True:
                    if active_count() < 21:
                        break
        while True:         # 等待爬取线程都结束
            if active_count() == 1:
                break
            else:
                sleep(0.2)
        rprint(f'{self.rule["name"]} 爬取完毕，本次新增 {self.newNum} 条数据')

    def numdomain(self, node, i):
        complexity = self.rule['complexity']
        _title = node.xpath(self.rule['titleXpath'])[0]
        if self.rule['encryption'] == 'base64':
            _title = base64.b64decode(_title.split("'")[1]).decode('utf-8')      # 解密
        title = ''.join(_title.strip('+').replace('?', '').split()).lower()      # 删除空格、问号和首尾的加号
        site = self.rule['name']
        tag = title2tag.aboutTitle(service.path, title, i, site, complexity)
        if tag == None:     # 此条视频过滤掉
            return
        url = self.rule['baseUrl'] + node.xpath(self.rule['pageurlXpath'])[0]
        if complexity== 1:        # 一级页面
            try:
                _magnet = node.xpath(self.rule['magnetXpath'])[0]
            except:
                rprint('什么鬼错误')
                return
            size = node.xpath(self.rule['sizeXpath'])[0].strip()
            postDate = node.xpath(self.rule['postdateXpath'])[0].strip()
            duration = 'unknown'
            link = 'no'
        else:                   # 二级页面和三级页面
            _html = req.req3(url)
            try:
                postDate = _html.xpath(self.rule['postdateXpath'])[0].strip().split('：')[-1]
            except AttributeError:
                postDate = _html.xpath(self.rule['postdateXpath'])[0].text.strip()          # 从注释中获取 postDate
            _size = _html.xpath(self.rule['sizeXpath'])
            if len(_size) > 1:      # 论坛中的资源，size 掺杂在一堆信息中，_size 有多个元素，需要挑选出 size
                for s in _size:
                    if '容量' in s or '大小' in s:
                        size = s.strip().split('：')[-1]
                        break
            else:
                size = _size[0].strip().split('：')[-1]
            if self.rule.get('durationXpath') == None:      # 论坛中的资源，经常没有 duration
                duration = 'unknown'
            else:
                duration = _html.xpath(self.rule['durationXpath'])[0].strip().split('：')[-1]
            if complexity == 2:        # 二级页面
                link = 'no'
                _magnet = _html.xpath(self.rule['magnetXpath'])[0]
            elif complexity == 3:                           # 三级页面
                link = _html.xpath(self.rule['linkXpath'])[0]
                if not link.startswith('http'):
                    link = self.rule['baseUrl'] + link
                _magnet = req.req3(link).xpath(self.rule['magnetXpath'])[0]
        self.savedb(tag, title, url, link, _magnet, size, duration, postDate, i)

    def savedb(self, tag, title, url, link, _magnet, size, duration, postDate, i):
        site = self.rule['name']
        magnet = _magnet.split("&")[0].lower()
        if tag == '必看':
            # result = cursor.execute('select count(*) from guochan where magnet=?', (magnet,)).fetchall()[0][0]
            result = service.query('select count(*) from guochan where magnet=?', (magnet,))[0][0]
            if result != 0:
                rprint(f'[green] {title} 已记录 ({i})——{site}')
                return
        
        recordDate = strftime('%Y-%m-%d %H:%M', localtime())
        status = 0
        if tag == 'western':
            tag = ' '
            # result = cursor.exec('insert into guochan (tag, title, url, link, magnet, size, duration, postdate, recorddate, site, status) values (?, ?, ?, ? ,? , ?, ?, ?, ?, ?, ?)',
            #                       (tag, title, url, link, magnet, size, duration, postDate, recordDate, site, status))
            result = service.exec('insert into western (tag, title, url, link, magnet, size, duration, postdate, recorddate, site, status) values (?, ?, ?, ? ,? , ?, ?, ?, ?, ?, ?)',
                        (tag, title, url, link, magnet, size, duration, postDate, recordDate, site, status))
            rprint(f'{self.newNum} --western-- ({i})——{site}')
        else:
            result = service.exec('insert into guochan (tag, title, url, link, magnet, size, duration, postdate, recorddate, site, status) values (?, ?, ?, ? ,? , ?, ?, ?, ?, ?, ?)',
                        (tag, title, url, link, magnet, size, duration, postDate, recordDate, site, status))
            rprint(f'{self.newNum} --guochan-- ({i})——{site}')
        if result == 0:
            self.savedb(tag, title, url, link, _magnet, size, duration, postDate, i)
        self.newNum += 1

    def gcbt(self, middleNode, i):
        postDate = middleNode.xpath('.//time/@datetime')[0].split('T')[0]
        node_a = middleNode.xpath('.//h2/a')[0]       # 以前 middleNode 就是 node_a，2023.9.23发现网页里没有发布时间了，只能在进去之前获取发布时间
        _title = node_a.xpath('./@title')[0]
        title = ''.join(_title.strip('+').split()).lower()      # 删除空格或多个连续空格
        tag = title2tag.aboutTitle(service.path, title, i, 'GCBT', 3)
        if not tag:
            return
        url = node_a.xpath('./@href')[0]
        _html = req.req3(url)
        # postDate = _html.xpath('//time/@datetime')[0].split('T')[0]       # 进入网页后获取发布时间
        _magnet = _html.xpath('//a[starts-with(@href, "magnet:?xt")]/@href')
        if _magnet != []:
            _magnet = _magnet[0]
            link = 'no'
        else:
            div  = _html.xpath('//div[@class="entry-content u-text-format u-clearfix"]')
            for subnode in div:
                sl = subnode.xpath('.//text()')
                if not sl:      # 跳过为空的
                    continue
                for s in sl:
                    if 'http' in s:
                        link = str(s)       # 不然，s 的类型是：lxml.etree._ElementUnicodeResult
            if not ('link' in locals()):    # 极个别情况，文中无链接
                print('未找到链接')
                with open('d:/exception_gcbt.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{url}\n')        # 将网址写入本地，人工确定
                return
            if  self.gcbtMagnet(link):
                if 'hash=' in link:
                    _magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1]
                else:
                    _magnet = 'magnet:?xt=urn:btih:' + link.split('/')[-1]
            elif 'rmdown' in link:
                _magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1][3:]
            elif 'www.82bt.com' in link:
                if 'hash' in link:
                    _magnet = 'magnet:?xt=urn:btih:' + link.split('hash=')[-1][3:]
                else:
                    agnet = req82bt(link)
                    if agnet.startswith('<br />'):      # 网站不能正确显示磁力链接
                        return                          # 不记录此视频
                    _magnet = 'magnet:?xt=urn:btih:' + agnet
            else:
                rprint(f'[red]未知链接：{url} | {link}')
                _magnet = '未知'
        size = duration = 'unknown'
        self.savedb(tag, title, url, link, _magnet, size, duration, postDate, i)
    
    def gcbtMagnet(self, link):
        knownDomain = ['k00ppc', 'picd231', '91dfjh', 'picd232', 'picdn', 'b23dowx', 'b23dowy', 'b23dowz', 'b23dowa']        # 关于 bi2303–picdn.cc，正确的网址，中间有两个 -
        for domain in knownDomain:
            if domain in link:
                return True


service.path = 'd:/data/total.db'

with open('c:/users/tian/desktop/cili/parseRule.json', 'r', encoding='utf-8') as f:
    rules = json.loads(f.read())

# 中途开始
# current_site = 'Sukebei'
# rules = list(rules.items())
# index = next((i for i, (name, _) in enumerate(rules) if name == current_site), None)
# rules = dict(rules[index:-1])

# 只爬取某个网站
# Numdomain(rules['gcbt'], starti=121, endi=150).prepare()
# Numdomain(rules['u9a9']).prepare()


for key in rules:
    rule = rules[key]
    if key.endswith('bt'):
        sleep(10)
    else:
        next = input('回车下一个：')
    Numdomain(rule).prepare()


