#!/usr/bin/python
# -*- encoding: utf8 =*-

import os
import urllib2
from HTMLParser import HTMLParser
#log = open("log.log", "w+")
def DetectCharset(s):
	charsets = ('gbk', 'gb18030', 'gb2312', 'iso-8859-1', 'utf-16', 'utf-8', 'utf-32', 'ascii')
	for charset in charsets:
		try:
			return unicode(s, charset)
		except:
			print "try %s failed!" % charset
			continue
	return s
	
class MySearchParser(HTMLParser):
	lyricArtists = []
	lyricTitles = []
	lyricUrls = []
	state = 0
	SongInfo = ""
	isBefore = True
	before = ""
	after = ""
	url = ""
	def handle_starttag(self, tag, attrs):
		if self.state == 1 and tag == 'a':
			self.state = 2
			self.before = ""
			self.after = ""
			self.isBefore = True
			for k,v in attrs:
				if k == "href":
					self.url = v
					break
		
	
	def handle_data(self,data):
		if self.state == 0 and data == "【LRC】":
			self.state = 1
		if self.state == 2:
			#print "%s   %d(%s, %s)" % (data, self.isBefore, self.before, self.after)
			if self.isBefore == True:
				if data.find('-') >= 0:
					self.isBefore = False
					info = data.split('-')
					self.before = self.before + info[0].strip(' ')
					if len(info) > 1:
						self.after = info[1].strip(' ')
					else:
						self.after = ''
				else:
					self.before = self.before + data
			else:
				self.after = self.after + ' ' + data
			#print "    %s   %d(%s, %s)" % (data, self.isBefore, self.before, self.after)
					
		
	
	def handle_endtag(self,tag):
		if self.state == 2 and tag == 'a':
			self.lyricTitles.append(self.before.strip(' '))
			self.lyricArtists.append(self.after.strip(' '))
			self.lyricUrls.append(self.url)
			self.state = 0
	
	def dumpInfo(self, title):
		index = 0
		for t in self.lyricTitles:
			if t.find(title) >= 0:
				print("作者：%s\n歌名：%s\nURL：%s\n" % (self.lyricArtists[index], t, self.lyricUrls[index]))
			index += 1

	def save(self, filename):
		f = open(filename, 'w+')
		f.write('<?xml version="1.0" encoding="UTF-8" ?>\n');
		index = 0
		f.write('<result>\n')
		for url in self.lyricUrls:
			f.write('\t<lrc id="%s" artist="%s" title="%s"></lrc>\n' % (self.lyricUrls[index], self.lyricArtists[index], self.lyricTitles[index]))
			index = index + 1
		f.write('</result>\n')
		f.close()

	def dumpXML(self):
		xml = '<?xml version="1.0" encoding="UTF-8" ?>\n'
		index = 0
		xml += '<result>\n'
		for url in self.lyricUrls:
			xml += '\t<lrc id="%s" artist="%s" title="%s"></lrc>\n' % (self.lyricUrls[index], self.lyricArtists[index], self.lyricTitles[index])
			index = index + 1
		xml += '</result>\n'
		return xml
        
	def dump(self):
		txt = ''
		index = 0
		for url in self.lyricUrls:
			txt += 'artist=%s\ntitle=%s\nid=%s\n' % (self.lyricArtists[index], self.lyricTitles[index], self.lyricUrls[index])
			index = index + 1
		return txt

def SearchLyric(artist, title):
	try:
	#if True:
		if len(artist) > 0:
			URL = "http://www.baidu.com/s?wd=" + title.decode('utf8').encode('GBK') + "-" + artist.decode('utf8').encode('GBK') + "+filetype%3Alrc&cl=3"
		else:
			URL = "http://www.baidu.com/s?wd=" + title.decode('utf8').encode('GBK') + "+filetype%3Alrc&cl=3"
		response = urllib2.urlopen(URL)
		sp = MySearchParser()
		html = DetectCharset(response.read()).encode('utf8')
		#log.write(html)
		sp.feed(html)
		return sp.dump()
	except:
		return ""


def DownLoadLyric(ID, artist, title):
	try:
		response = urllib2.urlopen(ID)
		return response.read()
	except:
		return ""

if __name__ == '__main__':
	print SearchLyric("羽泉", "奔跑")
	#log.close()	
		
	
