from sys import path as path2
from time import sleep, localtime, strftime
import json

import requests
from bs4 import BeautifulSoup
from rich import print as rprint
from py3langid.langid import LanguageIdentifier, MODEL_FILE

path2.append('c:/users/tian/desktop')
from Service import service
from service import req


with open('d:/data/list.json', 'r', encoding='utf-8') as f:
    keywords = json.loads(f.read())

identifier = LanguageIdentifier.from_pickled_model(MODEL_FILE, norm_probs=True)
identifier.set_languages(['zh', 'en', 'ja'])

def aboutTitle(title, magnet, i):
    '''
    由于个别主播资源的质量也很高，所以将白名单的优先级设为最高；
    为了“必看”资源追求高画质、无水印的目标，该 tag 的视频可重复记录，下载前筛选重复的 magnet 即可
    '''

    whitelist = keywords['whitelist']
    blacklist = keywords['blacklist']
    secondary = keywords['secondary']
    anchorman = keywords['anchorman']
    toupai = keywords['toupai']
    others = keywords['others']
    for k in whitelist:
        if k in title:
            tag = '必看'
            return tag

    lang = identifier.classify(title)[0]
    if lang == 'ja':
        print('日语，略过')
        return
    elif lang == 'zh':
        for k in blacklist:
            if k in title:
                print(f'含过滤词 {k}，略过 ({i})')
                return

    result = service.query('select magnet from test where title=?', (title,))
    if result != []:    # 判断存在该条记录
        if result[0][0] == magnet:
            rprint(f'[yellow]【{title}】已记录 ({i})')
            return
    for k in anchorman:
        if k in title:
            tag = '主播'
            return tag
    for k in toupai:
        if k in title:
            tag = '偷拍'
            return tag
    for k in others:
        if k in title:
            tag = k
            return tag
    for k in secondary:
        if k in title:
            tag = '探索'
            return tag
    tag = ' '
    return tag

class U9a9:
    def __init__(self):
        service.path = 'd:/data/u9a9.db'

    def prepare(self, startIndex, endIndex):
        newNum = 0
        for i in range(startIndex, endIndex):
            indexUrl = f'https://u9a9.xyz/?type=2&p={i}'
            while True:
                try:
                    node_tr = req.req2(indexUrl).tbody('tr')
                    break
                except Exception as e:
                    rprint('[red]索引页解析出错')
                    rprint('报错信息：', e)
                    sleep(3)
            for tr in node_tr:
                node_td = tr('td')
                title = ''.join(tr.a.attrs['title'].lstrip('+').replace('?', '').split()).lower()
                magnet = node_td[2]('a')[-1].attrs['href'].split("&")[0].lower()
                tag = aboutTitle(title, magnet, i)
                if not tag:
                    continue
                url = 'https://u9a9.xyz' + tr.a.attrs['href']
                size = node_td[3].string
                postDate = node_td[4].string
                recordDate = strftime('%Y-%m-%d %H:%M', localtime())
                status = 0
                service.exec('insert into test (tag, title, url, magnet, size, postdate, recorddate, status) values (?, ?, ?, ?, ?, ?, ?, ?)',
                            (tag, title, url, magnet, size, postDate, recordDate, status))
                newNum += 1
                rprint(f'{newNum} ({i})')
        rprint(f'U9a9 爬取完毕，本次新增 {newNum} 条数据')



