# -*- coding: utf-8 -*-
import scrapy
import re
import requests
from bs4 import BeautifulSoup
import bs4
from cilibaoSpider.items import CilibaospiderItem

def getResponse(url):
    cookies = {
        'PHPSESSID': 'nlpb2mr6gtgncsi9dve479q9p5',
        'ex': '1',
        'Hm_lvt_d944abe74d06c6f8e0875fdc2b67fa90': '1593682745',
        'Hm_lpvt_d944abe74d06c6f8e0875fdc2b67fa90': '1593699817',
    }

    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',    
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }

    response = requests.get(url, headers=headers, cookies=cookies, verify=False)
    # 返回获取的网页数据.
    return response.text

def myParser(item,url_response):
    title_text = ''
    marget_text = ''
    soup = BeautifulSoup(url_response, 'html.parser')
    for title in soup.find_all('h1', re.compile('res-title')):
        # get titles.
        if isinstance(title, bs4.element.Tag):
            # item['name'] = re.sub(p, '', name[0])           
            item['title_text_'] = title.string
            item['blank_area_'] = '----'
            # marget_list.append(title.string + '----')
    # 有多个满足条件的,定位不准确，只取第一个.
    # 也可以使用正则表达式.
    magnet_url = soup.find_all('a', 'download')[0]    
    if isinstance(magnet_url, bs4.element.Tag):
        # marget_list.append(magnet_url.get('href') + '\n')
        item['magnet_url_'] = magnet_url.get('href')
    # marget_list.append(magnet_text + '----' + title_text)


# 本文件由 scrapy genspider ciliSpider "cilibao.biz" 生成.
class CilispiderSpider(scrapy.Spider):
    name = 'ciliSpider'
    allowed_domains = ['cilibao.biz']
    urls_forward = 'http://cilibao.biz/s/'

    # 拼接请求的连接.
    keyworld = '996'
    urls_endof = '.html'

    # # 对资源显示进行排序，有时间，热度，大小三种，页面默认选择的是时间倒序列.
    real_url = urls_forward + keyworld + urls_endof
    start_urls = [real_url]

    def parse(self, response):
        items = []
        # 不需要构建对象直接保存，直接将获取到的连接保存到项目Download文件夹下.

        # 判断关键字到底有多少页数

        # 获取search-item链接->获取对应的磁力，磁力可以直接复制粘贴使用.

        # 判断资源链接数据是否为空.
        # for search_item in response.xpath("//*[@id="wall"]/div[1]/span[1]")
        urls_forward = 'http://cilibao.biz'
        print('*'*80)

        # 信息保存列表.
        marget_list = []

        if response.xpath('/html/body/div[1]/div[4]/div/div[1]/span[1]'):
            # 获取信息，是否查询到连接数量.
            content = response.xpath(
                '/html/body/div[1]/div[4]/div/div[1]/span[1]/text()').extract()[0]
            # 显示搜索到的资源数目
            print(content)

            # 用正则表达式 判定时候搜索资源数目不为0，继续解析.
            regex = re.compile(r'大约0条结果')
            num = regex.match(content)
            if num == None:
                # 为空，说明有连接，接下来抽取连接.
                # search-items-xpath: /html/body/div[1]/div[4]/div/div[2]/div[1]/div[1]
                for each_search_item in response.xpath('/html/body/div[1]/div[4]/div/div[2]/div[1]/div'):
                    # 解析对应的文件大小，热度等,需要文件大小大于50M，热度大于50...此处限定仅仅热度大于80.
                    # item = MyspiderItem()
                    item = CilibaospiderItem()
                    size_hit_regex = re.compile(r'\d')
                    # bar-xpath: /html/body/div[1]/div[4]/div/div[2]/div[1]/div[1]/div[3]
                    bar_size = each_search_item.xpath('div[3]/span[3]/b/text()').extract()[0]
                    
                    bar_hit = each_search_item.xpath('div[3]/span[4]/b/text()').extract()[0]

                    if int(bar_hit) < 0:
                        continue
                    """

                    # 抽取内容.
                    title_text = each_search_item.xpath(
                        'div[1]/h3/a/text()').extract()

                    # 抽取被强调的部分.
                    title_em = each_search_item.xpath(
                        'div[1]/h3/a/em/text()').extract()

                    # 合并成title.
                    title = title_text[0] + title_em[0] + title_text[1]
                    """

                    # 或者直接从连接的下一个页面直接获取完整的title也可以.

                    # 抽取href连接.
                    sub_url = each_search_item.xpath(
                        'div[1]/h3/a/@href').extract()
                    # 构建子页面的url.
                    subPage_url = urls_forward + sub_url[0]
                    
                    # 解析页面标题和磁力链接.
                    myParser(item, getResponse(subPage_url))
                    