import wx
import re
import binascii
import sys

from threading import Thread

from helpers import get_metainfo


################################################################

class ScrapeThread(Thread):
    """
    Retrieves scrape data from a tracker.
    """
    def __init__(self, torrent, manualscrape = False):
        Thread.__init__(self, None, None, None)
        self.setDaemon(True)
        self.torrent = torrent
        self.manualscrape = manualscrape
        self.status = _('Scraping')
        self.currentseed = "?"
        self.currentpeer = "?"

    def run(self):
        self.GetScrapeData()

    def GetScrapeData(self):       
        """
        connect scrape at tracker and get data
        save at self.currentpeer, self.currentseed
        if error put '?'
        """

        # The thread itself will update the list for its scraping infos
        self._updateTorrent()
        
        announce = self.torrent.getTracker()
        if not announce:
            self.status = _('Scraping not supported')
            self._updateTorrent()
            return
            
        # Does tracker support scraping?
        ix = announce.rfind('/')
        if ((ix == -1) or (announce.rfind("/announce") != ix)):
            # Tracker doesn't support scraping
            self.status = _('Scraping not supported')
            self._updateTorrent()
            return

        p = re.compile('(.*/)[^/]+')
        surl = p.sub(r'\1', announce)
        #Fix this to comply with scrape standards.
        ix = announce.rindex('/')
        
        if (ix + 9) > len(announce):
            ix2 = len(announce)
        else:
            ix2 = ix + 9
        
        if announce[(ix + 1):(ix2)].endswith("announce", 0):            
            # fix for some private trackers (change ? to &):
            if '?' in announce[ix2:]:
                infohashprefix = '&'
            else:
                infohashprefix = '?'
            surl = surl + 'scrape' + announce[ix2:] + infohashprefix + 'info_hash='
        
        #end new Scrape URL Code
        hashlen = len(self.torrent.infohash)
        for i in range(0, hashlen):
            if (i % 2 == 0):
                surl = surl + "%"
            surl = surl + self.torrent.infohash[i]
            
        # connect scrape URL
        scrapedata = get_metainfo(surl, style = "url")

        if scrapedata is None or not 'files' in scrapedata:
            self.status = _("Can't get scrape data from tracker")                        
        else:
            scrapedata = scrapedata['files']
            for i in scrapedata.keys():
                if binascii.b2a_hex(i) == self.torrent.infohash:
                    self.currentpeer = str(scrapedata[i]['incomplete'])
                    self.currentseed = str(scrapedata[i]['complete'])
                    self.status = _('Scraping Done')
        
        self._updateTorrent()

    def _updateTorrent(self):
        wx.CallAfter(self.__updateTorrent)
        
    def __updateTorrent(self):
        if not self.manualscrape:
            # Don't update status information if doing an automatic scrape
            status = ""
        else:
            status = self.status
        
        # The thread itself will update the list for its scraping infos
        self.torrent.updateScrapeData(self.currentpeer, self.currentseed, status)
