import cookielib, urllib, urllib2, threading, thread, inspect, ctypes
import time, sys
import subprocess
import re
import os, shutil
import ConfigParser
import getpass
from sql import *
from grabber.grabber import URLGrabber
from grabber import progress

def _async_raise(tid, exctype):
  """raises the exception, performs cleanup if needed"""
  if not inspect.isclass(exctype):
    raise TypeError("Only types can be raised (not instances)")
  res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
  if res == 0:
    raise ValueError("invalid thread id")
  elif res != 1:
    # """if it returns a number greater than one, you're in trouble, 
    # and you should call it again with exc=NULL to revert the effect"""
    ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
    raise SystemError("PyThreadState_SetAsyncExc failed")

class Thread(threading.Thread):
  def _get_my_tid(self):
    """determines this (self's) thread id"""
    if not self.isAlive():
      raise threading.ThreadError("the thread is not active")
                   
    # do we have it cached?
    if hasattr(self, "_thread_id"):
      return self._thread_id
                                                      
    # no, look for it in the _active dict
    for tid, tobj in threading._active.items():
      if tobj is self:
        sef._thread_id = tid
      return tid
                                                                                                                                                                                                        
    raise AssertionError("could not determine the thread's id")
    
  def raise_exc(self, exctype):
     """raises the given exception type in the context of this thread"""
     _async_raise(self._get_my_tid(), exctype)

  def terminate(self):
    """raises SystemExit in the context of the given thread, which should 
    cause the thread to exit silently (unless caught)"""
    self.raise_exc(SystemExit)



class redirectManager(urllib2.HTTPRedirectHandler):
    """Used to manage the redirects we can find when the user selected direct
    downloads at Megaupload"""
    def __init__(self):
        self.headers = ""
        self.redirect = ""

    def http_error_301(self, req, fp, code, msg, headers):
        """Executed when we find a 301 redirect"""
        result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
        result.status = code
        self.redirect = result.geturl()
        return result

    def http_error_302(self, req, fp, code, msg, headers):
        """Executed when we find a 302 redirect"""
        result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
        result.status = code
        self.redirect = result.geturl()
        self.headers = headers
        return result


class Megaupload():
#  def __init__(self):
#    """ Inicializamos la matriz global """

    # Get the login info from the config file, or ask for it
#    if os.environ.has_key("APPDATA") and os.path.exists(os.environ["APPDATA"]):
#      path = os.environ["APPDATA"] + "/megaupload-dl.ini"
#    else:
#      path = os.path.expanduser("~") + "/.megaupload-dl"

#    cfg = ConfigParser.SafeConfigParser()
#    cfg.readfp(file(path))
#    user = cfg.get("Login", "user")
#    password = cfg.get("Login", "password")
  
  
  def log(self, text):
    """Prints a message with the local time and date."""
    date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
    print "[%s] - %s" % (date, text)
    db = SQL()
    db.insertlog(text)
    db.close()
    
  def from_megaupload(self, url):
    """Checks if this is a megaupload link"""
    return (url.startswith("megaupload.com") or 
            url.startswith("www.megaupload.com") or 
            url.startswith("http://megaupload.com") or
            url.startswith("http://www.megaupload.com"))

  def set_access_cookie(self):
    """ Checks user and pass and saves the cookie
      Return 1: Password error or error login
      Return 0: OK 
    """
    db = SQL()
    try:
      user = db.getparameter('mu_user')
      password = db.getparameter('mu_pwd')
    except:
      return 1
    cred = urllib.urlencode({"login": "1","next": "c%3Dlogin", "username": user, "password": password})
    req2 = urllib2.Request("http://megaupload.com/?c=login",cred)
    gr2 = redirectManager()
    opener = urllib2.build_opener(gr2)
    source = opener.open(req2)
    cookie = gr2.headers.get("set-cookie", "")
    if cookie:
      (cookie,_) = cookie.split(":",1)
      self.log("MEGAUPLOAD: Logged in as %s. Cookie saved" % user)
    else:
      self.log("MEGAUPLOAD: Invalid user name or password")
      db.close()
      return 1
    db.setparameter('mu_cookie',cookie)
    db.close()
    return

  def quit(self):
    Thread.terminate()
    
  def download(self, url, queue, check):
    """
      url: Link from megaupload
      queue: Global diccionary with info about bandwith
      check: 1- We ONLY check the links  2.- Download the links.
    """
    db = SQL()
    try:
      cookie = db.getparameter('mu_cookie')
    except:
      self.log("MEGAUPLOAD: Paused: %s . User and pass not valid." % url)
      db.pauselink(url)
      db.close()
      return 0
    if not check:
      db.locklink(url)
    db.close()
    req = urllib2.Request(url)
    req.add_header("Cookie", cookie)
    req.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.6) Gecko/20061201 Firefox/2.0.0.8 (Archlinux)")
    gr = redirectManager()
    opener = urllib2.build_opener(gr)
    source = opener.open(req)

    if gr.redirect and url != gr.redirect:
      # If this is a redirect, the user selected Direct Downloads at Megaupload's preferences
      # so we can download the file already
      real_url = gr.redirect
      print gr.headers 
    else:
      # If this is not a redirect, we have to look for the direct download link
      source = source.read()
      prefixes = re.findall("document.getElementById\(\"download_html\"\).innerHTML = '<a href=\"http://www(\d*).*</a>", source)
      if prefixes:
        prefix = prefixes[-1]
      else:
        self.log("MEGAUPLOAD: This file cannot be downloaded: %s" % url)
        db = SQL()
        db.failed(url)
        db.close()
        return 0

      real_url = re.findall("document.getElementById\(\"downloadhtml\"\).innerHTML = '<a href=\"http://www\d*\.\.?(.*)\" class=\"downloadlink\"", source)[0]
      real_url = "www" + prefix + "." + real_url
      real_url = urllib.quote(real_url)

    # Checking link only?
    if check == 1:
      db = SQL()
      (_,name) = real_url.rsplit("/",1)
      db.updatelinkname(url,name)
      db.close()
      return 1
      
    # We continue with download
    print ("Real url: %s" % real_url)
    p = progress.text_progress_meter()
    num = thread.get_ident()
    queue[num] = { "progress": p, "object": self, "url": url, "error": "" }
    db = SQL()
#    print ("Parameter: %s" %db.getparameter("dest_path") )
    g = URLGrabber(reget='simple', progress_obj=p)
    ret = g.urlgrab(real_url)
    if ret != 0:
      if(os.path.isfile(db.getparameter("dest_path") + "/" + ret)):
         os.rename(db.getparameter("dest_path") + "/" + ret, db.getparameter("dest_path") + "/old_" + ret)
      shutil.move(ret,db.getparameter("dest_path"))
      queue[num]["error"] = "Moving file"
      db.downloaddone(url)
      db.close()
    else:
      db.failed(url)
      db.close()
      self.log("MEGAUPLOAD: Wget failed")
    del queue[thread.get_ident()]
    return
 
