#! /usr/bin/env python 
import sys
import os
import re
import urllib
import urlparse
from exceptions import *
import logging
import optparse

from BeautifulSoup import BeautifulSoup

import pdb

from modules.utils import *

"""
  NOTE:
    1. output strings in utf-8 encoding works much better than other encodings
    2. urllib.urlopen accept url string in encodings except unicode(utf-16, utf-8)
    3. to make url ready to pass to urlopen, we need to encode it first, urllib.urlencode
       only encode params, not url path, it will make trouble for urls that have non-ascii
       characters inside. in this case, use asciifyString function below; urllib.urlencode does
       accept unicode encodings
"""

def build_search_result_url(name, artist, type='mp3'):
    base_url = 'http://mp3.baidu.com/m'
    word = ''
    if name:
        word = name
    if artist:
        word += '+' + artist

    try:
       encoding = guess_encoding(word)
    except:
        print "I can not guess the encoding of your input"
        raise

    if encoding!='gbk':
        gb_word = convert_string_encoding(word, encoding, 'gbk')
    else:
        gb_word = word

    params_dict = {
              'f':'ms', # not know yet
              'tn':'baidump3',
              'ct':'134217728',
              'lf':'', # not know yet
              'rn':'', # not know yet
              'word':gb_word,
              'lm':'-1', #-1 (lyric or all type music); 0: mp3; 1:rm; 2:wma;6:flash
             }
    params = urllib.urlencode(params_dict)
    if params:
        url = base_url + '?' + params
    else:
        url = base_url
    return url

def download_list(start_index, end_index, dl, song_page, dl_song_page,
                    dl_lyric_page, options):

    bd = BDMp3()
    if options.top500:
        song_links = bd.get_top500_links()
    elif options.new200:
        song_links = bd.get_new200_links()
    else:
        return

    counter = start_index+1
    for link in song_links[start_index:end_index]:
        print "Start to download #%d "%counter
        if download_song(link, dl, song_page, dl_song_page, dl_lyric_page,
                         options):
            pass
        else:
            print "Failed to download #%d"%counter
        sys.stdout.flush()
        counter += 1

def download_song(url, dl, song_page, dl_song_page, dl_lyric_page, options):
    song_page.reset()
    result, reason = song_page.open(url)
    selected_songs = song_page.select_song(type=options.format)
    if result and selected_songs:
        song_base_name = selected_songs[0].name
        dl_page_url = selected_songs[0].url

        if options.song:
            song_url = dl_song_page.get_download_link(dl_page_url)
            if song_url:
                song_name = song_base_name + '.' + song_url[-3:].lower()
                #print "Dowload song %s" % song_name
                dl.download(song_name, song_url)

        if options.lyric:
            lyric_url = dl_lyric_page.get_download_link(selected_songs[0].lyric_url)
            if lyric_url:
                lyric_name = song_base_name + '.' + lyric_url[-3:].lower()
                #print "Dowload lyric %s" % lyric_name
                dl.download(lyric_name, lyric_url)
        return True
    else:
        print "Failed to download song for reason(%s)"%reason
        return False

class SongPage:
    def __init__(self):
        self.reset()

    def reset(self):
        self.header = []
        self.song_info_list = []

    def open(self, url):
        """
        I will open given url for download song 

        If Success, I will return True and a empty string
        otherwise,  I will return False and the reason string
        """
        soup = get_soup(url)
        tables = soup.findAll('table', {'class':'list'})

        if len(tables)!=1:
            # GOTCHA: 
            #  maybe the given url is not right
            raise WebPageLayoutChange()

        table = tables[0]
        trs = table.findAll('tr')
        if len(trs)==0:
            raise WebPageLayoutChange()
        elif len(trs)==1:
            return False, 'No song found.'

        #self.header = self._build_header(trs[0])
        song_info_entries = trs[1:]
        for song_info in song_info_entries:
            self.song_info_list.append(self._build_song_info(song_info))
        return True, ''

    def select_song(self, type='mp3', minsize=1.2, minspeed=9):
        result = [song_info for song_info in self.song_info_list
                  if (type=='*' or song_info.type.lower()==type)
                  and song_info.size>minsize
                  and song_info.speed>=minspeed
                  and song_info.url!=''
                  and song_info.lyric_url!='']
        return result

    def _build_header(self, header_tr):
        if not header_tr:
            return []
        header = []
        items = header_tr.findAll('td')
        for i in items:
            if getattr(i, 'b', None):
                header.append(i.string)
            else:
                header.append('No.')
        return header

    def _build_song_info(self, song_info_html):
        song_info = SongInfo()
        if not song_info_html:
            return song_info

        items = song_info_html.findAll('td')
        song_info.number = int(items[0].string)

        try:
            song_info.name = items[1].a.font.string.replace(' ', '_')
        except:
            pass

        try:
            song_info.artist = items[2].a.font.string
        except:
            pass

        song_info.url = encode_url(items[1].a['href'])

        if hasattr(items[5], 'a') and hasattr(items[5].a, 'href'):
            song_info.lyric_url = encode_url(items[5].a['href'])

        try:
            song_info.size = float(items[7].string.split()[0])
        except:
            #ignore invalid format of size
            pass

        if items[8].string:
            song_info.type = items[8].string.lower()

        if hasattr(items[9], 'img') and hasattr(items[9].img, 'src'):
            song_info.speed = int(items[9].img['src'][-5])

        return song_info

class SongDownloadPage:
    def get_download_link(self, dl_page_url):
        if not dl_page_url:
            return None
        soup = get_soup(dl_page_url)
        links = soup.findAll('a')
        if len(links)>=1 and hasattr(links[0], 'href'):
            link = encode_url(links[0]['href'])
            return link
        else:
            return None

class LyricDownloadPage:
    def get_download_link(self, dl_page_url):
        if not dl_page_url:
            return None
        soup = get_soup(dl_page_url)
        links = soup.findAll('a', href=re.compile('\.lrc$'))
        valid_lyric_links = [link for link in links if
                             link.string==u'LRC\u6b4c\u8bcd'
                             or link.string==u'LRC\xb8\xe8\xb4\xca']
        # currently, we only return the first one
        if valid_lyric_links:
            return encode_url(valid_lyric_links[0]['href'])
        else:
            return None

class BDMp3:
    def __init__(self):
        self.top500_url = encode_url(u'http://list.mp3.baidu.com/topso/mp3topsong.html?id=1')
        self.new200_url = encode_url(u'http://list.mp3.baidu.com/list/newhits.html?id=1')

    def get_top500_links(self):
        return self._get_links(self.top500_url)

    def get_new200_links(self):
        return self._get_links(self.new200_url)

    def _get_links(self, url):
        soup = get_soup(url)
        tds = soup.findAll('td', {'class':'border'})
        song_links = []
        for td in tds:
            if hasattr(td, 'a') and td.a:
                link = encode_url(td.a['href'])
                song_links.append(link)
        return song_links

if __name__ == '__main__':
    parser = optparse.OptionParser(usage="%prog [OPTION]...",
                                   version="%prog1.0")
    parser.add_option('-n', '--name', dest='name',
                      help='song\'s name to search')
    parser.add_option('-a', '--artist', dest='artist',
                      help='artist\'s name to search')
    parser.add_option('-f', '--format', dest='format',
                      default='*',
                      help='download specified format song')
    parser.add_option('-l', '--nolyric', dest='lyric',
                      action="store_false", default=True,
                      help='not download lyric, default download')
    parser.add_option('-s', '--nosong', dest='song',
                      action="store_false", default=True,
                      help='not download song, default download')
    parser.add_option('-5', '--top500', dest='top500',
                      action="store_true", default=False,
                      help='download songs from top500 list')
    parser.add_option('-2', '--new200', dest='new200',
                      action="store_true", default=False,
                      help='download songs from new200 list')
    parser.add_option('-t', '--dry-run', dest='dry_run',
                      action="store_true", default=False,
                      help='do not actually download files')
    options, args = parser.parse_args()

    dl = Downloader(output_indent=' '*4, dry_run_mode=options.dry_run)
    song_page = SongPage()
    dl_song_page = SongDownloadPage()
    dl_lyric_page = LyricDownloadPage()

    if options.top500 or options.new200:
        if len(args)!=2:
            msg = "start_index and end_index not given!"
            error(msg)

        download_list(int(args[0]), int(args[1]), dl,
                      song_page, dl_song_page, dl_lyric_page,
                      options)


    if options.artist or options.name:
        url = build_search_result_url(name=options.name, artist=options.artist)
        download_song(url, dl, song_page,
                      dl_song_page, dl_lyric_page,
                      options)


