# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from scrapy.contrib.loader import ItemLoader
#from scrapy.contrib.spiders import CrawlSpider
from torrent_crawler.items import Torrent
from urllib import quote

import re

DEFAULT_CATEGORY_URLS = (
    'https://www.thepiratebay.se/browse/200',
    'https://www.thepiratebay.se/browse/201',
    'https://www.thepiratebay.se/browse/202',
    'https://www.thepiratebay.se/browse/203',
    'https://www.thepiratebay.se/browse/204',
    'https://www.thepiratebay.se/browse/205',
    'https://www.thepiratebay.se/browse/206',
    'https://www.thepiratebay.se/browse/207',
    'https://www.thepiratebay.se/browse/208',
    'https://www.thepiratebay.se/browse/209',
    'https://www.thepiratebay.se/browse/299',
)


class ThepiratebaySpider(scrapy.Spider):
    name = "thepiratebay"
    allowed_domains = ["pirateproxy.sx"]
    crawler = None

    def set_crawler(self, c):
        self.crawler = c

    def __init__(self, search=None, *args, **kwargs):
        super(ThepiratebaySpider, self).__init__(*args, **kwargs)

        if search:
            self.start_urls = [
                'https://%s/search/%s/0/99/200' % (self.allowed_domains[0],
                                                   quote(search))
            ]
        else:
            self.start_urls = DEFAULT_CATEGORY_URLS

    def parse(self, response):
        if response.body.count('Database maintenance') > 0:
            self.crawler.stats.inc_value('pb_error')
            yield Request(response.url, dont_filter=True)
            return

        for magnet_link in response.xpath('//a[starts-with'
                                          '(@href, "magnet")]'
                                          '/@href').extract():
            loader = ItemLoader(Torrent())
            loader.add_value('name', magnet_link)
            loader.add_value('info_hash', magnet_link)
            loader.add_value('magnet_link', magnet_link)
            loader.add_value('origin', response.url)
            yield loader.load_item()

        r = self.schedule_next_page(response)
        if r:
            print "Yield: %s" % r
            yield r

    def schedule_next_page(self, response):
        if 'Disabled browsing beyond page' in response.body or \
                'Hm. Problem! Please retry.' in response.body or \
                'No hits' in response.body or \
                len(response.xpath('//a[starts-with'
                                   '(@href, "magnet")]'
                                   '/@href').extract()) == 0:
            return None

        if '/browse/' in response.url:
            def _next_page(m):
                a = m[0]
                if not a[2]:
                    page = 0
                else:
                    page = int(a[2])

                if not a[3]:
                    i = 0
                else:
                    i = int(a[3])
                return "%s/%s/%s/%s" % (a[0], a[1], page + 1, i)

            m = re.findall(r'(.+)/(\d+)(?:/(\d+)/(\d+))?$', response.url)
            if m:
                # first page
                return Request(_next_page(m))

        elif '/search' in response.url:
            def _next_page(m):
                a = m[0]
                if not a[1]:
                    page = 0
                else:
                    page = int(a[1])
                return "%s/%s/%s/%s" % (a[0], page + 1, a[2], a[3])

            m = re.findall(r'(.+?)/(\d+)(?:/(\d+)/(\d+))?$', response.url)
            if m:
                # first page
                print "Next: %s" % _next_page(m)
                return Request(_next_page(m))

        return None
