#!/usr/bin/python
# -*- coding: utf-8 -*-


# author: hitmoon <zxq_yx_007@163.com>
# date: 2013-09-17
# verion: 1.0

import os.path
import os
import urllib
import urllib2
import cookielib

import re
import sys
import zipfile
import time
import random
import httplib

#from bs4 import BeautifulSoup as BS
from optparse import OptionParser


def get_string_from_file(fname):
    f = open(fname, 'r')
    lines = f.readlines()
    result = ''
    for line in lines:
        result = result + line
    return result



re_music = re.compile(r'<span class="song-title\s*"[^>]+>\s*<a href="([^>"]+)"[^>]+>([^<]*)</a>', re.UNICODE)
re_author = re.compile(r'<span class="author_list"\s*title="([^>]+)">', re.UNICODE)
re_url =re.compile(r'<a data-btndata=\'[^\']+\'\s*href="([^"]+)"\s*id="([^"]+)"[^>]+>', re.UNICODE)


imgname = '{:03}.jpg'
# here I need a ssh tunnle to access the website, You may not need this
#proxies = {'ssh':'127.0.0.1:7070'}

proxies = {}


"""
html = urllib.urlopen(sys.argv[1], proxies=proxies)

soup = BS(html)
elements = soup.findAll(itemprop = "url")
tags = []
tags = [element.text for element in elements]

"""
opener = None
has_login = False
cookie_str = ""
cj = None
header = None
accel = None

def prepare(username, password):
    global has_login
    global cookie_str
    global opener
    global cj
    global header

    """
    agents = ['Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1 Safari/525.13',
              'Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13',
              'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
              'Mozilla/5.0 (X11; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0',
              'Opera/9.27 (Windows NT 5.2; U; zh-cn)'
              ]
    agent = random.choice(agents)
    info_msg("Random pick agent: '%s'"% agent)
    """
    header = [('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'),

('Accept', 'text/html;q=0.9,*/*;q=0.8'),
('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
('Connection','Keep-Alive'),
('Referer', 'http://www.baidu.com')
]

    if opener == None:
        cj = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        # pretend to be a browser
        opener.addheaders = header

    if username == "":
        return

    info_msg("Login baidu ...")
    ret = None

    while not has_login:
        ret = login(username, password)
        if ret != None:
            has_login = True
        else:
            info_msg("Retry ...")
            time.sleep(5)

    cookie_str = "BAIDUID=%s;H_PS_PSSID=%s"% (ret[0], ret[1])
    print cookie_str

def get_html(url):

    req = urllib2.Request(url)
    req.add_header('Cookie', cookie_str)

    conn = opener.open(req)
    data = conn.read()
    conn.close()
    data = data.decode('UTF-8')
    return data

def get_html2(url):
    data = get_string_from_file("music.html")
    return data

def get_cookie_value(cookie_name, cookieJar):
    for cookie in cookieJar:
        if cookie.name == cookie_name:
            return cookie.value
    return ""

def checkAllCookiesExist(cookieNameList, cookieJar) :
    cookiesDict={}
    for eachCookieName in cookieNameList :
        cookiesDict[eachCookieName] = False;
     
    allCookieFound = True;
    for cookie in cookieJar :
        if(cookie.name in cookiesDict) and cookie.value != "":
            cookiesDict[cookie.name] = True;
     
    for eachCookie in cookiesDict.keys() :
        if(not cookiesDict[eachCookie]) :
            allCookieFound = False;
            break;
 
    return allCookieFound;

def login(username, password):
    #print "[preparation] using cookieJar & HTTPCookieProcessor to automatically handle cookies";


    print "[step1] to get cookie BAIDUID";
    baiduMainUrl = "http://www.baidu.com/";
    resp = opener.open(baiduMainUrl);
    #respInfo = resp.info();
    #print "respInfo=",respInfo;
    """
    for index, cookie in enumerate(cj):
        print '[',index, ']',cookie;
    """
 
    print "[step2] to get token value";
    getapiUrl = "https://passport.baidu.com/v2/api/?getapi&class=login&tpl=mn&tangram=true";
    getapiResp = opener.open(getapiUrl);
    #print "getapiResp=",getapiResp;
    getapiRespHtml = getapiResp.read();
    #print "getapiRespHtml=",getapiRespHtml;
    #bdPass.api.params.login_token='5ab690978812b0e7fbbe1bfc267b90b3';
    foundTokenVal = re.search("bdPass\.api\.params\.login_token='(?P<tokenVal>\w+)';", getapiRespHtml);
    if(foundTokenVal):
        tokenVal = foundTokenVal.group("tokenVal");
        #print "tokenVal=",tokenVal;
 

        print "[step3] emulate login baidu";
        staticpage = "http://www.baidu.com/cache/user/html/jump.html";
        #baiduMainLoginUrl = "https://passport.baidu.com/v2/?login&fr=old"
        baiduMainLoginUrl = "http://www.baidu.com/cache/user/html/login-1.2.html"
        postDict = {
            #'codestring'    : "",
            'token'         : tokenVal, #de3dbf1e8596642fa2ddf2921cd6257f
            'username'      : username, 
            'password'      : password,
            #'verifycode'    : "",
        };
        postData = urllib.urlencode(postDict);
        # here will automatically encode values of parameters
        # such as:
        # encode http://www.baidu.com/cache/user/html/jump.html into http%3A%2F%2Fwww.baidu.com%2Fcache%2Fuser%2Fhtml%2Fjump.html
        #print "postData=",postData;
        req = urllib2.Request(baiduMainLoginUrl, postData);
        # in most case, for do POST request, the content-type, is application/x-www-form-urlencoded
        req.add_header('Content-Type', "application/x-www-form-urlencoded");
        resp = opener.open(req);
        """
        for index, cookie in enumerate(cj):
            print '[',index, ']',cookie;
        """
        cookiesToCheck = ['H_PS_PSSID', 'BAIDUID'];
        loginBaiduOK = checkAllCookiesExist(cookiesToCheck, cj);
        if(loginBaiduOK):
            print "+++ Emulate login baidu is OK, ^_^";
            return [get_cookie_value('BAIDUID', cj), get_cookie_value('H_PS_PSSID', cj)]
        else:
            print "--- Failed to emulate login baidu !"
            return None
    else:
        print "Fail to extract token value from html=",getapiRespHtml

    return None



def get_musics(url):
    html = get_html(url)
    #html = get_string_from_file("oldsong.htm")
    match = re_music.findall(html)
    author = re_author.findall(html)
    return [author,match]
    
def get_tags(html):
    #html = get_html(url)
    #html = get_string_from_file("music.html")
    tags = re_tags.findall(html)
    if tags != []:
        return tags
    return "no-tag"

def save(url, path):
    conn = urllib2.urlopen(url)
    total_len = conn.info().getheader('Content-Length')

    f =  open(path, 'wb')
    readn = 0
    while readn < int(total_len):
        
        data = conn.read(4096)
        f.write(data)
        readn = readn + len(data)
        sys.stdout.write("[ %s/%s ]\r"% (readn, total_len))
        sys.stdout.flush()
    f.close()
    conn.close()
 
    
    
def get_loc(url):
    html = get_html(url)
    match = re_url.findall(html)
    rlink = None
    rrate = 0
    for link, rate in match:
        if "http://" in link and int(rate) > rrate:
            rlink = link
            rrate = int(rate)
    #print "final link %s"% rlink
    return rlink
 
def get_pages(html):
    """Get number of pages"""
    #html = get_html(url)
    match = re_pages.search(html)
    if not match:
        raise ParsingError(html)
    return int(match.group(1))

def info_msg(msg):
    print "INFO: " + msg

def warn_msg(msg):
    print "WARN: " + msg

def compress(dir):
    pass

def dl(url, title, author, force_write=False, base_dir="~/"):
    info_msg("Starting downloading music: \"%s\""% title)

    outdir = (os.path.expanduser(base_dir))
    folder = re.sub('[~#$?–]', '', outdir)

    if author != "":
        out_file = folder + '/' + title + "-" + author + '.mp3'
    else:
        out_file = folder + '/' + title + '.mp3'

    if not force_write and os.access(out_file, os.R_OK):
        info_msg("Already downloaded, skip.\n")
        return
        
    loc = get_loc(url)
    if loc == None:
        warn_msg("Can not find the url!\n")
        return 
    i = loc.index("http://")

    if not os.path.exists(folder):
        info_msg("Directory '%s' not exist, create it!"% folder)
        os.makedirs(folder)

    info_msg('Downloading ... ')
    if accel == "axel":
        info_msg("Download use accelerator: %s"% accel)
        cmd = "axel -a -n 5 -o %s %s"% (out_file, loc[i:])
        os.system(cmd)
    else:
        save(loc[i:], out_file)

    info_msg("Downloading completed!\n")

    """
    if archive:
        fname = os.path.basename(folder)
        info_msg("Archiving file \"%s.zip\""%fname)
        f = zipfile.ZipFile(fname+".zip", 'w', zipfile.zlib.DEFLATED)
        
        for i in range(1, npages + 1):
            f.write(os.path.join(folder, imgname.format(i)), imgname.format(i))
        f.close()

        info_msg("Archiving file \"%s.zip\" completed!"% fname)
     """


class ParsingError(Exception):
    pass

 
def main():

    global title
    global author
    global cookie_str
    global accel
    

    p = OptionParser()
    #p.add_option('-s','--skip', dest='skip_tags', help='skip download music have this tags(comma separated, e.g: Tag1,Tag2)', default='')
    #p.add_option('-a','--auto-archive', dest='auto_archive', action='store_true', help='automatically archiving the music after downloading',default=False)

    p.add_option('-o', '--output-dir', dest="out_dir", default='~/', help='out put directory of the music or archive')

    p.add_option('-f', '--force-write', dest="force_write", action='store_true', default=False, help='force write the existing folder(file)')
    p.add_option('-u', '--url', dest="target_url",  default='', help='download from specified url')
    p.add_option('-U', '--username', dest="username",  default='', help='username of baidu')
    p.add_option('-P', '--password', dest="password",  default='', help='password of baidu')
    p.add_option('-C', '--cookie', dest="cookie",  default='', help='login cookie for baidu music')
    p.add_option('-l', '--limit', dest="limit",  default='infinite', help='limit max number to download')
    p.add_option('-a', '--accel', dest="accel",  default=None, help='use user specified downloader accelerator')

    options, args = p.parse_args()


    #sk_tags = options.skip_tags.split(',')
    #auto_archive = options.auto_archive
    out_dir = options.out_dir
    force_write = options.force_write
    target_url = options.target_url
    user = options.username
    passwd = options.password
    cookie_str = options.cookie
    max_down = options.limit
    accel = options.accel

    print "##################################################"
    #print "Skip tags = ", sk_tags
    #print "Auto-archive = ", auto_archive
    print "Force-write = ", force_write
    print "Output dir = ", out_dir
    print "##################################################"
    
    info_msg("Prepare  ...")
    prepare(user, passwd)

    base = "http://music.baidu.com/top/new"
    if target_url != "":
        base = target_url

    info_msg("Starting download Music from %s"%base)
    authors, musics = get_musics(base)
    num = len(musics)
    info_msg("Total musics: %d"% num)
    if max_down != "infinite" and (int(max_down) < num):
        num = int(max_down)
        info_msg("Limit to max %d pieces!"% num)

    i = 0
    for music in musics:
        if i >= num:
            break
        temp = music[0].split('/')
        url = "http://music.baidu.com" + music[0] + "/download" + "?__o=%%2F%s%%2F%s"%(temp[0], temp[1])
        #print "url = ", url
        #urls = get_loc(url)
        #print music[1]," = ", urls

        info_msg("[ %d / %d]"%(i + 1, num))
        if authors != []:
            dl(url, music[1], authors[i], force_write, out_dir)
        else:
            dl(url, music[1], "", force_write, out_dir)
        i = i + 1;

    info_msg("All work done! Enjoy! ;-)")

 
if __name__ == "__main__":
    reload(sys)
    sys.setdefaultencoding('utf-8')
    main()

