import os
import random
import re
import time
import urllib2
from BeautifulSoup import BeautifulSoup
from Helpers import DebugObj
import OpenAnything
from Settings import Settings


class SourceSite:

  def __init__(self, url):
    self.url = url
    self.domain = self.__loadDomain(url)
    self.mp3links = []

  def __loadDomain(self, url):
    domainPattern = re.compile('^http:\\/\\/(.*?)\\/')
    return domainPattern.search(url).groups()[0]

  def get(self):
    DebugObj().write('Fetching ' + self.url)
    try:
      self.content = OpenAnything.fetch(self.url)['data']
      self.mp3links = self.__parsemp3links(self.content)
    except urllib2.URLError:
      DebugObj().write("\turllib2.URLError with site " + self.url)

  def __ismp3link(self, link):
    return link.endswith('.mp3')

  def __parsemp3links(self, content):
    mp3links = []
    soup = BeautifulSoup(content)
    alllinks = soup('a')
    for link in alllinks:
      if not link.has_key('href'):
        continue
      link = link['href']
      if self.__ismp3link(link):
        mp3links.append(link)
    return mp3links


class DownloadDB:

  def __init__(self, filename='downloads.db'):
    self.filename = filename
    try:
      tmpdb = [ (strLine.strip().lower(), True) for strLine in open(self.filename).readlines()]
      self.db = dict(tmpdb)
    except(IOError):
      self.db = {}

  def contains(self, item):
    return self.db.has_key(item.strip())

  def add(self, item):
    self.db[item.strip()] = True
    open(self.filename, 'a').write(item + '\n')


class MusicCrawler:

  def __loadsites(self):
    sites = Settings.Sites
    random.shuffle(sites)
    sources = []
    for site in sites:
      sources.append(SourceSite(site))
    return sources

  def __loadFilesToGet(self, sourcesites, db):
    masterlist = []
    for site in sourcesites:
      site.get()
      for link in site.mp3links:
        link = link.lower();
        if not link.startswith('http'):
          link = 'http://' + site.domain + link
        if link.startswith('https://'):
          link = link.replace('https://', 'http://', 1)
        if link in masterlist:
          continue
        if db.contains(link):
          continue
        masterlist.append((link, urllib2.unquote(link.split("/")[-1]), site.domain))
    random.shuffle(masterlist)
    return masterlist

  def __getFiles(self, masterlist, db):
    currentCount = 0
    for link, filename, domain in masterlist:
      parentdir = os.path.join(Settings.BasePath, domain)
      if not os.path.isdir(parentdir):
        os.mkdir(parentdir)
      if os.path.isfile(os.path.join(parentdir, filename)):
        filename = str(random.random()) + "." + filename
      filename = os.path.join(parentdir, filename)
      DebugObj().write('Downloading ' + str(currentCount+1) + '/' + str(len(masterlist)))
      try:
        httpdata = OpenAnything.fetch(link)
        if not httpdata.has_key('status'):
          DebugObj().write('\tNo status')
        elif httpdata["status"] in [404]:
          pass
        else:
          if not httpdata['status'] in [200, 301, 302]:
            DebugObj().write('\tReceived Status ' + str(httpdata['status']))
          fmp3 = open(filename, 'wb')
          fmp3.write(httpdata['data'])
          fmp3.close()
      except urllib2.URLError:
        DebugObj().write("\turllib2.URLError with link " + link)
      except httplib.InvalidURL:
        DebugObj().write("\thttplib.InvalidURL with link " + link)
      db.add(link)
      currentCount = currentCount + 1
    DebugObj().write("Downloaded %d files" % currentCount)

  def run(self):
    db = DownloadDB()
    sourcesites = self.__loadsites()
    masterlist = self.__loadFilesToGet(sourcesites, db)  
    self.__getFiles(masterlist, db)  


if __name__ == "__main__":
  start = time.time()
  mc = MusicCrawler()
  mc.run()
  end = time.time()
  runtime = end - start
  DebugObj().write("Runtime: %d secs" % (runtime))