# import libraries
import urllib.request
import logging
from typing import List

from bs4 import BeautifulSoup

from orm.entity import Cookie, Mate, is_uncensored_origin
from orm.session import get_session

# 日志
log = logging.getLogger('log_idea')
log.setLevel(logging.INFO)
# 创建一个控制台处理器
console_handler = logging.StreamHandler()
# 设置控制台处理器的输出格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
# 将控制台处理器添加到logger对象中
log.addHandler(console_handler)


# 域名
domain = 'https://javdb368.com'
# c11年份  c7_348破解 C7_345无码流出 c10_1链接 c10_2含字幕
# first = '/tags?c7=348&c10=1,2&c11=2023'
# first = '/tags?c7=345&c10=1'
first = '/actors/PRe9'
session = get_session()
cookie_entities = session.query(Cookie).all()


def setCookie(_jdb_session: str):
    for e in cookie_entities:
        if e.name == '_jdb_session':
            e.value = _jdb_session


def build_cookie_str(cookies: List[dict]):
    return '; '.join(['{}={}'.format(x.get('name'), x.get('value')) for x in cookies])


def get_soup(suffix):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
        'cookie': '; '.join('{}={}'.format(e.name, e.value) for e in cookie_entities)
    }
    url_page = get_url(suffix)
    req = urllib.request.Request(url_page, headers=headers)
    page = urllib.request.urlopen(req)
    for header in page.getheaders():
        if header[0] == 'set-cookie' and '_jdb_session=' in header[1]:
            _jdb_session = header[1].replace('_jdb_session=', '').split('; ')[0]
            setCookie(_jdb_session)

    return BeautifulSoup(page, 'html.parser')


def get_url(suffix):
    return domain + suffix


def find_next_page(_soup: BeautifulSoup):
    return _soup.find('a', attrs={'class': 'pagination-next'})


def build_row(_soup: BeautifulSoup, _rows: []):
    _table = _soup.find('div', attrs={'class': 'movie-list'})
    _items = _table.find_all('div', attrs={'class': 'item'})
    uni_set = set()
    for _item in _items:
        # 标题
        _href = _item.select_one('a')
        _title = _item.select_one('.video-title strong').text.upper()
        _score = _item.select_one('.score .value').text.strip()
        _meta = _item.select_one('.meta').text.strip()
        _row = {'title': _title, 'score': _score, 'meta': _meta,
                'href': _href.get('href')}
        if _title not in uni_set:
            uni_set.add(_title)
            _rows.append(_row)


def do_it(uncensored_origin: bool):
    log.info('翻页搜集 - 开始')
    rows = []
    # 爬数据
    soup = get_soup(first)
    page_index = 1
    log.info('当前第{}页'.format(page_index))
    btn_next = find_next_page(soup)
    build_row(soup, rows)
    while btn_next:
        page_index += 1
        # 下一页链接
        next_suffix = btn_next.get('href')
        log.info('当前第{}页'.format(page_index))
        # 爬数据
        soup = get_soup(next_suffix)
        btn_next = find_next_page(soup)
        build_row(soup, rows)

    log.info('翻页搜集 - 结束')
    log.info('！！！共收集了{}条！！！'.format(len(rows)))

    # 所有的元数据
    mates = session.query(Mate).all()
    exist_titles = set([x.name for x in mates])
    # 要处理的row
    if uncensored_origin:
        rows_deal = [x for x in rows if x['title'] not in exist_titles and is_uncensored_origin(x['title'])]
    else:
        rows_deal = [x for x in rows if x['title'] not in exist_titles]
    log.info('！！！共需处理{}条！！！'.format(len(rows_deal)))

    save_count = 0
    # for i, row in enumerate(reversed(rows_deal)):
    for i, row in enumerate(rows_deal):
        addr_detail = row['href']
        title = row['title']
        release_date = row['meta']
        # 爬数据
        soup = get_soup(addr_detail)
        log.info('刮削---当前{}条, 标题:{}, 日期:{}, 地址:{}, 进度:{:.2f}%'
                 .format(i, title, release_date, addr_detail, i / len(rows_deal) * 100))
        # 标题
        current_title = soup.select_one('.title .current-title').text.strip()
        # 基础信息
        minute = 0
        movie_info = soup.find('nav', attrs={'class': 'movie-panel-info'})
        block_infos = movie_info.select('.panel-block')
        for block_info in block_infos:
            if '分鍾' in block_info.text:
                minute = int(block_info.select_one('.value').text.replace('分鍾', '').strip())

        # 列表抓取
        table = soup.find('div', attrs={'class': 'magnet-links'})
        items = table.find_all('div', attrs={'class': 'magnet-name'})
        # 元数据组
        link_arr = []
        for item in items:
            mate = Mate(name=title, state=0, minute=minute, title=current_title)
            mate.release_date_format(release_date)
            # 标题
            title_origin = item.select_one('a .name').text.strip()
            mate.suffix_format(title_origin)
            # 大小
            item_size = item.select_one('a .meta')
            if not item_size:
                continue
            size_str: str = item_size.text.strip().split(',')[0]
            # 必须GB以上的
            if 'GB' not in size_str:
                continue
            size = float(size_str.replace('GB', ''))
            # 小于1.5G的不要
            if size < 1.5:
                continue
            mate.size = size
            # 磁力
            mate.magnet = item.select_one('a').get('href')
            # if mate.captions and (mate.uncensored or mate.leaked or mate.uncensored_origin):  # 需要字幕
            if mate.uncensored or mate.leaked or mate.uncensored_origin:  # 无需字幕
                # 链接
                link_arr.append(mate)
        if len(link_arr) > 0:
            mate: Mate
            if uncensored_origin:
                # ###步兵版###
                if '未公開' in current_title:
                    log.info('未公開, 本次剔除')
                    continue
                # 现有时间限制
                if minute > 78 or minute < 40:
                    log.info('时长不符合预期, 本次剔除')
                    continue
                caption_arr = [x for x in link_arr if 1.5 < x.size and x.captions]
                best_arr = [x for x in link_arr if 1.5 < x.size < 3.2]
                # 优先找带字幕的
                if len(caption_arr) > 0:
                    mate = caption_arr[0]
                # 其次找1.4~2.4的
                elif len(best_arr) > 0:
                    mate = best_arr[0]
                # 真不行找最大的
                else:
                    mate = max(link_arr, key=lambda x: x.size)
                # 步兵需要看评论评论, 排除冷饭
                soup_review = get_soup(addr_detail + '/reviews/lastest')
                reviews = soup_review.find('div', attrs={'class': 'message-body'})
                same = ['冷饭', '相同']
                if len([x for x in same if x in reviews.text.strip()]) > 0:
                    log.info('检测到炒冷饭, 本次剔除')
                    continue
            else:
                # ###骑士版###
                # 2.5 GB以上带字幕的直接使用, 否则取最大的
                caption_arr = [x for x in link_arr if x.size > 2.5 and x.captions]
                if len(caption_arr) > 0:
                    mate = caption_arr[0]
                else:
                    mate = max(link_arr, key=lambda x: x.size)

            print('保存---开始.{}'.format(mate.to_dict()))
            save_count += 1
            session.add(mate)
            session.commit()
            print('保存---结束.当前入库{}条'.format(save_count))
        else:
            log.info('未找到符合的磁力链接, 本次剔除')
    log.info('刮削结束---本次共收集{}条'.format(save_count))


if __name__ == "__main__":
    do_it(True)
