#!/usr/bin/python
# -*- encoding: utf8 =*-
from HTMLParser import HTMLParser
import os, urllib2


#log = open('log.log', 'w+')
def DetectCharset(s):
	charsets = ('gbk', 'gb18030', 'gb2312', 'iso-8859-1', 'utf-16', 'utf-8', 'utf-32', 'ascii')
	for charset in charsets:
		try:
			return unicode(s, charset)
		except:
			print "try %s failed!" % charset
			continue
	return s
	
class MySearchParser(HTMLParser):
    tags = []
    title = ''
    artist = ''
    state = 0
    lyricUrls = []
    lyricTitles = []
    lyricArtists = []
    url = ""
    def handle_starttag(self, tag, attrs):
        self.tags.insert(0, tag)
        if tag == 'a':
            for attr in attrs:
                if attr[0] == 'href':
                   self.url = attr[1]
                   break
                    
    def handle_data(self, data):
    	if self.state == 0 and data == "曲名：":
    		self.state = 1
    	elif self.state == 1 and data == "歌手：":
    		self.state = 2
    	elif self.state == 2 and data == "专辑：":
    		self.state = 0
    	elif data == "LRC歌词下载":
    		self.lyricTitles.append(self.title)
    		self.lyricArtists.append(self.artist)
    		self.lyricUrls.append(self.url)
    		self.state = 0
    		self.title = ''
    		self.artist = ''
    		
    	if len(self.tags) == 0:
    		return
    	if self.tags[0] == "font" or self.tags[0] == "a":
    		if self.state == 1:
    			self.title = data
    		elif self.state == 2:
    			self.artist = data
            
    def handle_endtag(self, tag):
        if self.tags[0] == tag:
            self.tags.pop(0)
        
    def save(self, filename):
        f = open(filename, 'w+')
        f.write('<?xml version="1.0" encoding="UTF-8" ?>\n');
        index = 0
        f.write('<result>\n')
        for url in self.lyricUrls:
			f.write('\t<lrc id="%s" artist="%s" title="%s"></lrc>\n' % (self.lyricUrls[index], self.lyricArtists[index], self.lyricTitles[index]))
			index = index + 1
        f.write('</result>\n')
        f.close()
        
    def dumpXML(self):
    	xml = '<?xml version="1.0" encoding="UTF-8" ?>\n'
        index = 0
        xml += '<result>\n'
        for url in self.lyricUrls:
			xml += '\t<lrc id="%s" artist="%s" title="%s"></lrc>\n' % (self.lyricUrls[index], self.lyricArtists[index], self.lyricTitles[index])
			index = index + 1
        xml += '</result>\n'
        return xml
        
    def dump(self):
    	txt = ''
    	index = 0
        for url in self.lyricUrls:
			txt += 'artist=%s\ntitle=%s\nid=%s\n' % (self.lyricArtists[index], self.lyricTitles[index], self.lyricUrls[index])
			index = index + 1
        return txt
        
def SearchLyric(artist, title):
	try:
	#if True:
		if len(artist) > 0:
			theurl = 'http://mp3.sogou.com/gecisearch.so?query=%s-%s' % (artist.decode('utf8').encode('GBK'), title.decode('utf8').encode('GBK'))
		else:
			theurl = 'http://mp3.sogou.com/gecisearch.so?query=%s' % (title.decode('utf8').encode('GBK'))
		#print theurl
		response = urllib2.urlopen(theurl)
		parser = MySearchParser()
		html = DetectCharset(response.read()).encode('utf8')
		#log.write("html" + html)
		parser.feed(html)
		return parser.dump()
	except:
		return ""

def DownLoadLyric(id, artist, title):
	try:
		theurl = 'http://mp3.sogou.com/%s' % id
		response = urllib2.urlopen(theurl)
		return DetectCharset(response.read()).encode('utf8')
	except:
		return ""

if __name__ == '__main__':
	print SearchLyric('羽泉', '奔跑')
	#f = open('1', 'r')
	#parser = MySearchParser()
	#parser.feed(f.read())
	#print parser.dump()
	#f.close()
	#log.close()
	
