# -*- coding: utf-8 -*-
import re
import requests
from bs4 import BeautifulSoup
import bs4

"""
需要自己设置关键词.
将获取到的磁力保存到文本文档中，限定磁力热度15+
"""


def getResponse(url):
    # cookies注意不可用的时候更新.
    cookies = {
        'PHPSESSID': 'nlpb2mr6gtgncsi9dve479q9p5',
        'ex': '1',
        'Hm_lvt_d944abe74d06c6f8e0875fdc2b67fa90': '1593682745',
        'Hm_lpvt_d944abe74d06c6f8e0875fdc2b67fa90': '1593699817',
    }

    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }

    response = requests.get(url, headers=headers,
                            cookies=cookies, verify=False)
    # 返回获取的网页数据.
    return response.text


def subParser(magnet_title_list, url_response):
    # 抽取的是磁力链接.
    title_text = ''
    marget_text = ''
    soup = BeautifulSoup(url_response, 'html.parser')
    for title in soup.find_all('h1', re.compile('res-title')):
        # get titles.
        if isinstance(title, bs4.element.Tag):
            title_text = title.string
    # 有多个满足条件的,定位不准确，只取第一个.
    # 也可以使用正则表达式.
    magnet_url = soup.find_all('a', 'download')[0]
    if isinstance(magnet_url, bs4.element.Tag):
        # marget_list.append(magnet_url.get('href') + '\n')
        marget_text = magnet_url.get('href')
    magnet_title_list.append(marget_text + '----' + title_text + '\n')


def mainParser(url_response):
    # 解析页面，返回当前页面的连接列表.
    total_list = []
    soup = BeautifulSoup(url_response, 'html.parser')
    for search_item in soup.find_all('div', 'search-item'):
        filter_lowhit = search_item.find_all('div', 'item-bar')[0]
        # +?变为最短匹配，没有？贪婪匹配模式.
        hit_regex = re.compile(r'<b>\d+?</b>')
        num_regex = re.compile(r'\d+')
        num_b = hit_regex.search(str(filter_lowhit)).group(0)
        num = num_regex.search(num_b).group(0)
        # print(num)
        # 资源热度判断,热度过低直接pass.
        if int(num) < 15:
            continue
        filter_url = search_item.find_all('a')[0].get('href')
        total_list.append('http://cilibao.biz'+filter_url+'\n')
    return total_list


# 模块功能测试
# total_list = []
# url_response = getResponse('http://cilibao.biz/s/996.html')
# mainParser(total_list, url_response)


def getPages(url_response):
    # 返回资源页数.
    soupPage = BeautifulSoup(url_response, 'html.parser')
    if soupPage.find_all('li', 'last_p'):            
        li_class = soupPage.find_all('li', 'last_p')[0]

        # 你用正则表达式获取标签中的页数.
        class_regex = re.compile(r'\d+.html')
        num_parser = class_regex.search(str(li_class)).group(0)
        print(num_parser)
        page_nums = re.search(r'\d+', num_parser).group(0)

        # 返回资源搜索到的页数.
        return int(page_nums)
    return 1


def buildPagesList(urls_forward, keyworld, page_nums):
    # 默认网站的构造网页为：urls_forward
    PagesList = []
    for i in range(1, page_nums+1):
        # 拼接网页网址.
        # http://cilibao.biz/s/test_rel_3.html
        buildPage = urls_forward + keyworld + '_rel_{}.html'.format(i)
        PagesList.append(buildPage)
    return PagesList


def main():
    # 构造请求网址.
    urls_forward = 'http://cilibao.biz/s/'

    # 拼接请求的连接.
    keyworld = 'makemodel'
    urls_endof = '.html'
    magnet_title_list = []

    # # 对资源显示进行排序，有时间，热度，大小三种，页面默认选择的是时间倒序列.
    real_url = urls_forward + keyworld + urls_endof

    url_response = getResponse(real_url)
    main_soup = BeautifulSoup(url_response, 'html.parser')
    if main_soup.find_all('div', 'search-item'):
        # 非空，则说明返回了searc-item列表.

        # 获取总的网页连接页数，int
        page_nums = getPages(url_response)
        PagesList = buildPagesList(urls_forward, keyworld, page_nums)

        for i in PagesList:
            print("总页数为:{},  当前连接页为:{}".format(page_nums, i))
            mainParser_url = mainParser(getResponse(i))
            for j in mainParser_url:
                subParser(magnet_title_list, getResponse(j))

    try:
        with open('keyworld.txt', "w") as fo:
            print("保存URL到文件:", fo.name)
            fo.writelines(magnet_title_list)
    except:
        print("write file failed!")
        return "error"


if __name__ == "__main__":
    main()
