#!/usr/bin/python
# -*- coding: utf-8 -*-
#-------------------------------------------------
#Create At: 2008-7-27 上午01:12:36 
#Create By: Fla.sam
#
#DESC: xiami music scanner  utils
#
#-------------------------------------------------

from BeautifulSoup import BeautifulSoup
import cookielib
import re
import threading
import time
import urllib
import urllib2

re_upload_protocol = re.compile( r'^emumo\:\/\/\S*' )

class XiaMiBadRequestError( Exception ):
    pass

class XiaMiBadSearchTypeError( Exception ):
    pass

class XiaMiBadInstancesError( Exception ):
    pass

class XiaMiIterError( Exception ):
    pass

class XiaMiWebHandle:
    
    def __init__( self, username, passwd, keep_online=False, auto_login=False, proxy=None ):
        '''
        XiaMi Web handle 
        
        #TODO: http proxy Support
        
        @param username: username
        @param passwd: password
        @param keep_online: 如果为 True 则 建立一个线程,每隔几分钟访问一次xiami网站, 如果不在线则自动登陆 
        @param auto_login: 如果为 True 则在类 实例时自动登陆, 否则必须手动登陆
        @param proxy: 代理支持, {proxy_host:proxy_port}
        '''
        
        self.username = username
        self.passwd = passwd
        self.keep_online = keep_online
        self.auto_login = auto_login
        
        self.USER_AGENT = 'XScannerWebHandle/0.1'
        self.my_user_agent = [( 'User-Agent', self.USER_AGENT ), ]
        self.LOGIN_URL = '''http://www.xiami.com/member/login'''
        self.LOGOUT_URL = '''http://www.xiami.com/member/logout'''
        
        self.CHECK_URL = '''http://www.xiami.com/member'''
        self.CHECK_LOGINED_TITLE = u'我的虾米' 
        
        self.HEARTBEAT_INTERVAL = 60 * 15
        
        self.SEARCH_TYPE = {'title':'index', 'album':'album', 'artist':'artist'} 
        
        # make my cookie
        self.cj = cookielib.CookieJar()
        self.opener = urllib2.build_opener( urllib2.HTTPCookieProcessor( self.cj ) )
        
        # now add user-agent to request header
        self.opener.addheaders = self.my_user_agent

        # install my user-defined opener to urllib2
        urllib2.install_opener( self.opener )
        
        if self.auto_login:
            # auto login to xiami
            self.doLogin()
        
        if self.keep_online:
            # now make xiami heartbeat, keep me alway logined
            self.doStartHeartbeat()

    def __heartbeat( self ):
        while 1:
            if self.isLogined():
                pass
            else:
                self.doLogin()
            time.sleep( self.HEARTBEAT_INTERVAL )
        
    def doStartHeartbeat( self ):
        '''
        手动激活心跳, 使得帐户尽量永远在线
        '''
        self.__heartbeat_thread = threading.Thread( target=self.__heartbeat )
        self.__heartbeat_thread.start()
        
    def doStopHeartbeat( self ):
        '''
        手动停止心跳, cookie超时后必须再次手动登陆
        '''
        pass

    def request( self, url, **request_param ):
        
        if not hasattr( self, 'opener' ):
            raise XiaMiBadRequestError
        _request = urllib2.Request( url, urllib.urlencode( request_param ) )
        try:
            res = urllib2.urlopen( _request )
        except urllib2.URLError:
            return ''
        context = res.read()
        res.close()
        return context
    
    def doLogin( self ):
        '''
        登陆虾米网站
        '''
        res = self.request( self.LOGIN_URL, \
                              log_login_name=self.username, \
                              log_passwd=self.passwd, \
                              done='/member', \
                              submit=11 )
    
    def doLogout( self ):
        '''
        登出虾米网站
        '''
        res = self.request( self.LOGOUT_URL )
        
    def doClean( self ):
        self.opener.close()
        
    def isLogined( self ):
        '''
        判断当前是否已经登陆, 返回 True or False
        '''
        try:
            res = self.request( self.CHECK_URL )
        except XiaMiBadRequestError:
            return False
        html_operate = BeautifulSoup( res )
        if html_operate.title.string != self.CHECK_LOGINED_TITLE:
            return False
        else:
            return True
    
    def doSearch( self, search_type, search_str ):
        '''
        http://xiami.com/search/index/key/search_string
        http://xiami.com/search/album/key/search_string
        http://xiami.com/search/artist/key/search_string
        @param type:
        @param search_str:
        '''
            
        if search_type not in self.SEARCH_TYPE.keys():
            raise XiaMiBadSearchTypeError, 'Not support search type'
        else:
            return XiaMiWebSearchHelper( self, search_type, search_str )
        
    def doFriendListening( self ):
        '''
        get my friend listening song
        '''
        _url = '''http://www.xiami.com/'''
        _soup = BeautifulSoup( self.request( _url ) )
        
        _friendlistening_block = _soup.find( 'div', attrs={'class':'index_top_right'} )
        _friendlistening_lists = []
        for x in  _friendlistening_block.findAll( 'ul' ):
            _friendlistening = {}
            for i, h in enumerate( x.findAll( 'li' ) ):
                if i == 0 :
                    _friendlistening['user'] = dict( h.a.attrs )['href'].split( '/' )[2]
                    _friendlistening['user_nick'] = h.a.string
                elif i == 1:
                    _friendlistening['time_ago'] = h.string
                elif i == 2:
                    _friendlistening['title'] = h.a.string
                    _friendlistening['title_number'] = dict( h.a.attrs )['href'].split( '/' )[2]
                else:
                    pass
            _friendlistening_lists.append( _friendlistening )
        return _friendlistening_lists
        
class XiaMiWebSearchHelper:
    
    def __init__( self, handel, search_type, search_str ):
        '''
        xiami Web search helper
        @param handel: xiami web handle instance
        @param search_type: search type [title, album, artist]
        @param search_str: search string 
        '''
        
        self.xiami_web_handel = handel
        if not isinstance( self.xiami_web_handel, XiaMiWebHandle ):
            raise XiaMiBadInstancesError, 'Require XiaMiWebhandle Class instances'
        
        self.pages = 1
        self.index = 0
        
        self.items = 0
        
        if search_type not in self.xiami_web_handel.SEARCH_TYPE.keys():
            raise XiaMiBadSearchTypeError
        else:
            self.search_type = search_type
            
        self.search_str = urllib.quote( search_str )
        
        self._do_search_info( self.search_type, self.search_str )
        
        self.search_results = []
        
    def __str__( self ):
        return u'Search [ %s ] has [ %s ] , found [ %d ] item, [ %d ] pages.' % ( self.search_type, urllib.unquote( self.search_str ), self.items, self.pages )
    
    def _do_search_info( self, search_type, search_str ):
        '''
        give me search results info, item count , page count, do not call me
        @param search_type:
        @param search_str:
        '''
        
        if search_type == 'title':
            _url = '''http://www.xiami.com/search/index/key/%s''' % search_str
        elif search_type == 'album':
            _url = '''http://www.xiami.com/search/album/key/%s''' % search_str
        elif search_type == 'artist':
            _url = '''http://www.xiami.com/search/artist/key/%s''' % search_str
        else:
            raise XiaMiBadSearchTypeError
        
        res = self.xiami_web_handel.request( _url )
        _soup = BeautifulSoup( res )
        _html_find_count = _soup.find( 'li', attrs={'class':'search_end_bg_1'} )
        _st = _html_find_count.a.string
        self.items = int( _st[_st.find( '(' ) + 1:_st.find( ')' )] )
        self.pages = ( self.items / 20 ) + 1
                
    def _do_search_index( self, search_str , page_number=None ):
        '''
        search title by string , do not call me
        @param search_str:
        @param page_number:
        '''
        '''

        @param search_str:
        '''
        if page_number == None or page_number == 1:
            _url = '''http://www.xiami.com/search/index/key/%s''' % search_str
        else:
            _url = '''http://www.xiami.com/search/index/key/%s/page/%d''' % ( search_str, page_number )
            
        res = self.xiami_web_handel.request( _url )
        _soup = BeautifulSoup( res )
        
        try:
            _songs_soup = _soup.find( 'table', attrs={'class':'song'} )
        except AttributeError:
            return None
        try:
            _tr = _songs_soup.findAll( 'tr' )
        except AttributeError:
            return None
        
        _song_lists = []
        for s in _tr:
            _song_item_info = {}
            for i, x in enumerate( s ):
                if i == 1:
                    if len( x.findAll( 'a' ) ) == 2:
                        _song_item_info['has_song'] = False
                    else:
                        _song_item_info['has_song'] = True
                    _song_item_info['title'] = x.a.string
                    _song_item_info['title_number'] = dict( x.a.attrs )['href'].split( '/' )[2]
                elif i == 5:
                    _song_item_info['album'] = x.a.string
                    _song_item_info['album_number'] = dict( x.a.attrs )['href'].split( '/' )[2]
                elif i == 7:
                    _song_item_info['artist'] = x.a.string
                    _song_item_info['artist_number'] = dict( x.a.attrs )['href'].split( '/' )[2]
                else:
                    pass
            _song_lists.append( _song_item_info )
        return _song_lists 
    
    def _do_search_album( self, search_str, page_number=None ):
        '''
        search album by string, do not call me
        @param search_str:
        @param page_number:
        '''
        if page_number == None or page_number == 1:
            _url = '''http://www.xiami.com/search/album/key/%s''' % search_str
        else:
            _url = '''http://www.xiami.com/search/album/key/%s/page/%d''' % ( search_str, page_number )
            
        res = self.xiami_web_handel.request( _url )
        _soup = BeautifulSoup( res )
        
        _album_lists = []
        for x in _soup.findAll( 'div', attrs={'class':'search_end_special'} ):
            _album_item_info = {}     
            _album_item_info['album'] = x.h4.a.string
            _album_item_info['album_number'] = dict( x.h4.a.attrs )['href'].split( '/' )[2]
            _album_item_info['artist'] = x.table.a.string
            _album_item_info['artist_number'] = dict( x.table.a.attrs )['href'].split( '/' )[2]
            if x.div.find( 'a', attrs={'href':re_upload_protocol} ) == None:
                _album_item_info['has_song'] = True
            else:
                _album_item_info['has_song'] = False
            _album_lists.append( _album_item_info )
            
        return _album_lists
    
    def _do_search_artist( self, search_str, page_number ):
        '''
        search artist by string , do not call me 
        @param search_str:
        @param page_number:
        '''
        if page_number == None or page_number == 1:
            _url = '''http://www.xiami.com/search/artist/key/%s''' % search_str
        else:
            _url = '''http://www.xiami.com/search/artist/key/%s/page/%d''' % ( search_str, page_number )
            
        res = self.xiami_web_handel.request( _url )
        _soup = BeautifulSoup( res )
        
        _artist_lists = []
        for x in _soup.findAll( 'div', attrs={'class':'search_end_siger'} ):
            _artist_item_info = {}
            _artist_item_info['artist'] = x.table.a.string
            _artist_item_info['artist_number'] = dict( x.table.a.attrs )['href'].split( '/' )[2]
            _artist_lists.append( _artist_item_info )
        return _artist_lists
        
    def get_result_items_count( self ):
        '''
        return search item count
        '''
        return self.items
    
    def get_result_page_count( self ):
        '''
        return search resulte page count
        '''
        return self.pages
    
    def get_all_results( self ):
        '''
        return all search results
        '''
        if self.index != self.pages:
            raise XiaMiIterError, 'Must read all results page'
        return self.search_results
        
    def __len__( self ):
        return self.pages
    
    def __iter__( self ):
        return self
    
    def next( self ):
        '''
        iterator!!!!!
        '''
        if self.index == self.pages or self.items == 0:
            raise StopIteration
        self.index += 1
        
        if self.search_type == 'title':
            _current_page_results = self._do_search_index( self.search_str, self.index )
        elif self.search_type == 'album':
            _current_page_results = self._do_search_album( self.search_str, self.index )   
        elif self.search_type == 'artist':
            _current_page_results = self._do_search_artist( self.search_str, self.index )
        else:
            pass
        
        self.search_results += _current_page_results
        return _current_page_results  
    
def main():
    
    xiami = XiaMiWebHandle( 'flasam', 's13c241122' , auto_login=True )
    if xiami.isLogined():
        s1 = xiami.doSearch( 'title', '认输 郭' )
        print s1
    
        for x in s1:
            pass
        
        print s1.get_all_results()
    else:
        print 'please login into xiami site'
        
    print xiami.doFriendListening()
    

if __name__ == '__main__':
    #import cProfile
    #cProfile.run('main()')
    main()