#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import sys
#import threading
import re
#import pycurl
import urllib
import urllib2
#import base64
#from Cookie import SimpleCookie
import cookielib, os.path
from dumbmenu import *

try:
    from cStringIO import StringIO
except ImportError:
    from StringIO import StringIO

try:
    import signal
    from signal import SIGPIPE, SIG_IGN
    signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
    pass

COOKIEFILE = 'cookies.lwp'

responses = {
    100: ('Continue', 'Request received, please continue'),
    101: ('Switching Protocols',
          'Switching to new protocol; obey Upgrade header'),

    200: ('OK', 'Request fulfilled, document follows'),
    201: ('Created', 'Document created, URL follows'),
    202: ('Accepted',
          'Request accepted, processing continues off-line'),
    203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
    204: ('No Content', 'Request fulfilled, nothing follows'),
    205: ('Reset Content', 'Clear input form for further input.'),
    206: ('Partial Content', 'Partial content follows.'),

    300: ('Multiple Choices',
          'Object has several resources -- see URI list'),
    301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
    302: ('Found', 'Object moved temporarily -- see URI list'),
    303: ('See Other', 'Object moved -- see Method and URL list'),
    304: ('Not Modified',
          'Document has not changed since given time'),
    305: ('Use Proxy',
          'You must use proxy specified in Location to access this '
          'resource.'),
    307: ('Temporary Redirect',
          'Object moved temporarily -- see URI list'),

    400: ('Bad Request',
          'Bad request syntax or unsupported method'),
    401: ('Unauthorized',
          'No permission -- see authorization schemes'),
    402: ('Payment Required',
          'No payment -- see charging schemes'),
    403: ('Forbidden',
          'Request forbidden -- authorization will not help'),
    404: ('Not Found', 'Nothing matches the given URI'),
    405: ('Method Not Allowed',
          'Specified method is invalid for this server.'),
    406: ('Not Acceptable', 'URI not available in preferred format.'),
    407: ('Proxy Authentication Required', 'You must authenticate with '
          'this proxy before proceeding.'),
    408: ('Request Timeout', 'Request timed out; try again later.'),
    409: ('Conflict', 'Request conflict.'),
    410: ('Gone',
          'URI no longer exists and has been permanently removed.'),
    411: ('Length Required', 'Client must specify Content-Length.'),
    412: ('Precondition Failed', 'Precondition in headers is false.'),
    413: ('Request Entity Too Large', 'Entity is too large.'),
    414: ('Request-URI Too Long', 'URI is too long.'),
    415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
    416: ('Requested Range Not Satisfiable',
          'Cannot satisfy request range.'),
    417: ('Expectation Failed',
          'Expect condition could not be satisfied.'),

    500: ('Internal Server Error', 'Server got itself in trouble'),
    501: ('Not Implemented',
          'Server does not support this operation'),
    502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
    503: ('Service Unavailable',
          'The server cannot process the request due to a high load'),
    504: ('Gateway Timeout',
          'The gateway server did not receive a timely response'),
    505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
    }
    
class SimpleCurl:
    def __init__(self,url):
        self.round = 0.0
        self.curl = None
        self.data = None
        self.url = url
        self.closed= False
        self.reinit()

    def reinit(self):
        if self.curl and not self.closed:
            self.curl.close()
        self.closed= False
        self.curl = pycurl.Curl()
        self.curl.setopt(pycurl.VERBOSE, 0)
        print "GOTO:",self.url
        self.curl.setopt(pycurl.URL, self.url)
        #self.curl.setopt(pycurl.WRITEDATA, self.target_file)
        self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
        self.curl.setopt(pycurl.COOKIEFILE, "./cookies.txt")
        self.curl.setopt(pycurl.COOKIEJAR, "./cookies.txt")
        self.curl.setopt(pycurl.NOPROGRESS, 0)
        self.curl.setopt(pycurl.PROGRESSFUNCTION, self.progress)
        self.curl.setopt(pycurl.MAXREDIRS, 25)
        self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)
        self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)
        self.curl.setopt(pycurl.NOSIGNAL, 1)
        self.data = StringIO()
        self.curl.setopt(pycurl.WRITEFUNCTION, self.data.write)


    def progress(self, download_t, download_d, upload_t, upload_d):
        if download_t == 0:
            self.round = self.round + 0.1
            if self.round >= 1.0:  self.round = 0.0
        else:
            self.round = float(download_d) / float(download_t)
        print self.round

    def __perform(self):
        if self.closed:
            self.reinit()
        self.curl.perform()    
        self.curl.close()
        self.closed = True

    def getresponse(self):
        #self.data.seek(0)
        return self.data.getvalue()

    def seturl(self,url):
        self.url = url
        self.reinit()

    def get(self):
        self.__perform()
        return self.getresponse()

    def postform(self,formdata): # formdata = {'field1': 'value1'}
        post = urllib.urlencode(formdata)
        print post
        self.curl.setopt(pycurl.POST, 1)
        self.curl.setopt(pycurl.POSTFIELDS, post)
        return self.get()
        
class SimpleUrl:
    def __init__(self,url = None):
        self.req = None
        self.url = url
        self.html = None
        self.resp = None
        self.headers = {}
        #self.cookies = SimpleCookie() #No worky, wtf
        self.cookies = cookielib.LWPCookieJar()
        if os.path.isfile(COOKIEFILE):
            self.cookies.load(COOKIEFILE)
            print 'Reusing old cookies'
            
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))
        urllib2.install_opener(opener)
        
    def add_data(self,data):
        self.req.add_data(urllib.urlencode(data))
        
    def postform(self,url,data=None,use_cookies=True):
        self.url = url
        self.req = urllib2.Request(self.url,data,headers=self.headers)
        if use_cookies:
            self.set_cookies()
        return self.__get_html()
        
    def add_header(self,key,val):
        self.headers[key] = val
    
    def downloadfile(self,url,file):
        print "Downloading %s"%url
        self.req = urllib2.Request(url, headers=self.headers)
        #if use_cookies:
        self.set_cookies()
        self.resp = urllib2.urlopen(self.req)
        f = open(file,'w')
        f.write(self.resp.read())
        f.close()
        print "Done"
        
    def set_cookies(self):
        return
        '''
        c = ''
        for cookie in self.cookies.items():
            #print '=> %s' % cookie[1].output(header='Cookie:')
            c += "%s;" % cookie[1].output(header='Cookie:').replace('Cookie:','')
        #print "=>",c
        self.req.add_header('Cookie', '%s' % c)
        '''
        
    def get_cookies(self):
        return self.cookies
    
    def geturl(self):
        if self.resp:
            return self.resp.geturl()
        return None
        
    def get(self,url,use_cookies=True):
        self.url = url
        print "Getting ",url
        self.req = urllib2.Request(self.url, headers=self.headers)
        if use_cookies:
            self.set_cookies()
        return self.__get_html()
        
    def __get_html(self):
        self.resp = urllib2.urlopen(self.req)
        self.html = self.resp.read()
        print "Done"
        #for index, cookie in enumerate(self.cookies):
        #    print index, '  :  ', cookie
        self.cookies.save(COOKIEFILE) 
        #print self.resp.info()
        #jar = self.resp.info().getallmatchingheaders('Set-Cookie')
        #for c in jar: self.cookies.load(c)
        return self.html

    def set_url(self, newurl):
        self.url = newurl
        #self.use_set_cookies()
    
    def getresponse(self):
        return self.html        
    
class LiveLogin:
    LOGIN = "http://login.live.com"
    POSTURL = ""
    REDIRECT_PHRASE = 'JavaScript required to sign in'
    #inputs = re.compile(r'/.*?input.*?name\s*="(?P<name>[^"]*).*?value\s*="(?P<value>[^"]*).*?/',re.I|re.M)
    inputs = re.compile(r'input.*?name="(?P<name>[^"]+).*?value="(?P<value>[^"]+)',re.I|re.M)
    action = re.compile(r'action="(?P<action>[^"]+)',re.I|re.M)
    params = { 'login':'', 'passwd':'','type':1 }
    curl = None
    html = None
    loggedin = False
    
    def login(self):
        print "Logging in..."
        self.curl = SimpleUrl()
        self.html = self.curl.get(self.LOGIN,True)
        
    def postlogin(self):
        print "Posting data..."
        self.html = self.curl.postform(self.POSTURL, urllib.urlencode(self.params))
        #print self.html
        
    def getsock(self):
        return self.curl
        
    def get(self,url):
        self.html = self.curl.get(url)
        return self.html
    
    def gethtml(self):
        return self.html
        
    def parsehtml(self):
        for m in  self.action.finditer(self.html):
            self.POSTURL = m.group('action')
            #print self.POSTURL
        
        for m in  self.inputs.finditer(self.html):
            self.params [m.group('name')] = m.group('value')
        #print self.params


    '''    
    def __init__(self,user,passwd):
        self.login()
        self.parsehtml()
        
        self.params['login'] = user
        self.params['passwd'] =  passwd
        self.POSTURL()
        
        if self.html.find(r'JavaScript required to sign in')>0:
            self.params = {}
            self.parsehtml()
            self.POSTURL()
            self.get("https://account.live.com")
            if self.html.find(r'account.live.com')>0:
                self.loggedin = True
        
        
        #if self.loggedin:
        self.get("http://skydrive.live.com")
        if self.html.find(r'JavaScript required to sign in')>0:
            self.params = {}
            self.parsehtml()
            self.POSTURL()
        #else:
        #    print "Log in failed."
        #and now.... dunno
        dump = open('dump.htm','w')
        dump.write(self.html)
        dump.close()
    '''
    def __init__(self,user,passwd):
        """ Teoorias peaks nagu töötama :S
        self.curl = SimpleUrl()
        self.get("https://account.live.com")
        if self.html.find(r'Account - Windows Live')>0:
            self.loggedin = True
            self.dump()
            print "Logged in ,i think"
            return
            
        if self.html.find(self.REDIRECT_PHRASE)>0:
            print "Account JS redirect"
            self.params = {}
            self.parsehtml()
            self.postlogin()
            self.get("https://account.live.com")
            if self.html.find('Account - Windows Live')>0:
                self.loggedin = True
                print "Logged in ,i think"
                return
        """
        #Start fresh
        self.login()
        self.parsehtml()
        
        self.params['login'] = user
        self.params['passwd'] =  passwd
        #print self.params
        self.postlogin()
        
        if self.html.find(self.REDIRECT_PHRASE)>0:
            print "Account JS redirect"
            self.params = {}
            self.parsehtml()
            self.postlogin()
            #self.get("https://account.live.com")
            if self.html.find('Account - Windows Live')>0:
                self.loggedin = True
                print "Logged in ,i think"
                
        
        #if self.loggedin:
    def logto(self, url):
        self.get(url)
        if self.html.find(self.REDIRECT_PHRASE)>0:
            self.params = {}
            self.parsehtml()
            print "Logged to " + url
            self.postlogin()
            self.dump()
            return True
        #else:
        #    print "Log in failed."
        #and now.... dunno
        
        return False

            
        
    def dump(self):
        dump = open('dump.htm','w')
        dump.write(self.html)
        dump.close()

class SkyDrive:
    live = None
    CID = "https://"
    HOME = "skydrive.live.com/home.aspx"
    BROWSE = "skydrive.live.com/browse.aspx"
    UPLOAD = "skydrive.live.com/upload.aspx"
    NEWFOLDER = "skydrive.live.com/newlivefolder.aspx?ct=skydrive"
    links = []
    retvLinks = re.compile(r'a.*?class="tvLink".*?title="(?P<title>[^"]+).*?href="(?P<url>[^"]+)',re.I|re.M)
    #rePics = re.compile(r'a.*?href="(?P<url>[^"]+).*?id="spPicturePreview".*?<\/a>',re.I|re.M)
    reFiles = re.compile(r'displayName:\s*\'(?P<name>[^\']+).*?url:\s*\'(?P<url>[^\']+)',re.I|re.M)
    reDown = re.compile(r"_download\s*=\s*'(?P<url>[^']+)",re.I|re.M)
    
    def __init__(self,user,passwd):
        self.live = LiveLogin(user,passwd)
        if self.live.loggedin and self.live.logto("http://skydrive.live.com"):
            print "SkyDrive login successful"
            print self.live.curl.geturl()
            m = re.match( r'.*?(?P<CID>cid.*?)\.skydrive',self.live.curl.geturl())
            if m:
                self.CID = "https://%s." % m.group("CID")
                print "CID", m.group("CID")
        else:
            raise Exception("Failed to login")
        
        self.live.get(self.CID + self.HOME)
        self.live.dump()
        self.parseLinks()
    
    def parseLinks(self):
        del self.links[:]
        self.links.append( ['Home', self.CID + self.HOME, 0])
        
        for m in  self.retvLinks.finditer(self.live.gethtml()):
            #self.params [m.group('title')] = m.group('url')
            #print m.group('title'), m.group('url')
            self.links.append([m.group('title'), m.group('url'),0])
            
        for m in  self.reFiles.finditer(self.live.gethtml()):
            link = self.reDown.search(self.live.gethtml())
            if link: 
                url = link.group('url')
                self.links.append([m.group('name'), url, 1])
            else:
                url = m.group('url')
                self.links.append([m.group('name'), url, 0])
            
            print m.group('name'), url
        #name = self.reFiles.match(self.live.gethtml())
        #link = self.reDown.match(self.live.gethtml())
        #if name and link:
        #    self.links.append([name.group('name'), link.group('url'), 1])
            
    def newfolder(self,name):
        params = {}
        params ["PC_ConnectionsRoleSelect"] = "Contributor" #or "Member"
        params ["PC_ConnectionsFOF"] = 1
    
    def unescape(self,url):
        fuck = re.compile(r'\\x(?P<hex>.{2})',re.I)
        for m in fuck.finditer(url):
            url = url.replace( '\\x%s'%m.group('hex'), chr(int("0x%s"%m.group('hex'),16)) )
        return url
        
    def loop(self):
        '''Debug loop, mis nati nagu funkab ka'''
        line = ''
        while 1: #not line.startswith("quit"):
            #line = raw_input("$>").strip()
            try:
                #if line.startswith('http'):
                #    self.live.get(line)
                #    self.live.dump()
                #    self.parseLinks()
                #elif line.startswith('menu'):
                i = dumbmenu("Select item",[x for x,y,z in self.links])
                if i == -1: break
                item = self.links[i]
                if item[2] == 0:
                    self.live.get( item[1] )
                    self.live.dump()
                    self.parseLinks()
                if item[2] == 1:
                    i = dumbmenu("Download this file",['Yes', 'No'])
                    if i == 0:
                        #url = item[1] # this shit has \x%% in it, but what unescapes it sanely????
                        url = self.unescape(item[1])
                        self.live.getsock().downloadfile(url, url.split('/')[-1].replace('?download','') )
                        
            except urllib2.HTTPError,e:
                print e.message
    
    
if __name__ == "__main__":
    sky = SkyDrive("grepmoo@gmail.com","XXXXXX")
    sky.loop()
