import xml.parsers.expat,re,unicode2ascii, pickle

glabels=[]
gitem=0
gtrack=0
gartist=0
galbum=0
gtitle=0
giteml={}

from htmlentitydefs import name2codepoint

name2codepoint = name2codepoint.copy()
name2codepoint['apos']=ord("'")

EntityPattern = re.compile('&(?:#(\d+)|(?:#x([\da-fA-F]+))|([a-zA-Z]+));')

def decodeEntities(s, encoding='utf-8'):
    def unescape(match):
        code = match.group(1)
        if code:
            return unichr(int(code, 10))
        else:
            code = match.group(2)
            if code:
                return unichr(int(code, 16))
            else:
                code = match.group(3)
                if code in name2codepoint:
                    return unichr(name2codepoint[code])
        return match.group(0)

    return EntityPattern.sub(unescape, s.decode(encoding,'ignore'))

rep_emp = re.compile(r'\$|&|\+|,|;|=|@|"|\'|#|%|{|}|\^|\[|\]|`')
rep_sp = re.compile(r'\.\.\.|\.\.|/|\\')
rep_dash = re.compile(r'~|\||:|<|>|\. |\?|!|\(|\)|--')

def cleanData(data):
    # Added to support sghtml & unicode encoded characters
    fixedTitle = decodeEntities(data)
    #print 'fixed',repr(fixedTitle)
    fixed2 = unicode2ascii.unicode2ascii(fixedTitle)
    #print 'fixed2',repr(fixed2), repr(fixed2.encode('ascii', 'replace'))
    fixed2 = fixed2.replace('?','-')
    ascii = fixed2.encode('ascii', 'replace').replace('?','')
    #print 'ascii',ascii
    title = ascii

    #efficient
    title = rep_emp.sub('', title)
    title = rep_sp.sub(' ', title)
    title = rep_dash.sub('', title)

    #title = title.replace('  ',' ')

    return title

gurl_id=0
gloc=0
gres=0
gids=[]
gresult={}

def see_start_element(name, attrs):
    global gurl_id,gloc,gres
    if name == "result_list_item":
	gres=1
    elif name == "mp3_url_id":
	gurl_id=1
    elif name == "location":
	gloc=1
    

def start_element(name, attrs):
    global gitem,gtrack,gartist,galbum,gtitle
    #print gdevs,gid
    #print 'Start element:', name, attrs
    if name == "item":
	gitem=1
    elif name == "mm:Track":
	gtrack=1
    elif name == "mm:Artist":
        gartist=1
    elif name == "mm:Album":
        galbum=1
    elif name == "dc:title":
        gtitle=1

def see_end_element(name):
    global gurl_id,gloc,gresult,gids,gres
    if name == "result_list_item":
	gres=0
	#print gresult
	gresult['loc']=gresult['loc'].split('/')[-1]
	gids.append(gresult)
	gresult={}
    elif name == "mp3_url_id":
        gurl_id=0
    elif name == "location":
        gloc=0

def end_element(name):
    global glabels,gitem,gtrack,gartist,galbum,gtitle,giteml
    if name == "item":
        gitem=0
	glabels.append(giteml)
	giteml={}
    elif name == "mm:Track":
        gtrack=0
    elif name == "mm:Artist":
        gartist=0
    elif name == "mm:Album":
        galbum=0
    elif name == "dc:title":
        gtitle=0

def see_char_data(data):
   global gres,gurl_id,gloc,gresult
   if gres:
	#print data
        if gurl_id:
            if not 'id' in gresult:
                gresult['id']=data
            else:
                gresult['id']+=data
        if gloc:
            if not 'loc' in gresult:
                gresult['loc']=data
            else:
                gresult['loc']+=data



def char_data(data):
   global glabels,gitem,gtrack,gartist,galbum,gtitle,giteml
   if gitem:
	if gtrack:
	    if gtitle:
		if not 'track' in giteml:
		    giteml['track']=cleanData(data)
		else:
		    giteml['track']+=cleanData(data)
	if gartist:
	    if gtitle:
		if not 'artist' in giteml:
		    giteml['artist']=cleanData(data)
		else:
		    giteml['artist']+=cleanData(data)
	if galbum:
	    if gtitle:
		if not 'album' in giteml:
		    giteml['album']=cleanData(data)
		else:
		    giteml['album']+=cleanData(data)
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data

q = xml.parsers.expat.ParserCreate()
q.StartElementHandler = see_start_element
q.EndElementHandler = see_end_element
q.CharacterDataHandler = see_char_data


import sys,getopt

def usage():
        print "Usage:\t"+sys.argv[0],'[-h] username'
        print "\t-h --help print this help message"

def parseopts(iargs):
    iopts="h"
    longopts=["help"]
    opts=[]
    args=[]
    try:
        opts, args = getopt.getopt(iargs,iopts,longopts)
    except getopt.GetoptError, err:
        print str(err) # will print something like "option -a not recognized"
        usage()
        sys.exit(2)
    if len(args)!=1:
        #print args
        print "Expected username, got: ", " ".join(args)
        usage()
        sys.exit(2)
    for o, a in opts:
        #print o,a
        if o in ("-h", "--help"):
            usage()
            sys.exit()
    return args

import urllib2,urllib,random

def getRand9():
    a='a b c d e f g h i j k l m n o p q r s t u v w x y z'
    b=a.upper()
    c='1 2 3 4 5 6 7 8 9'
    d=a.split()+b.split()+c.split()
    out=''
    for i in range(9):
	out+=random.choice(d)
    return out
	

def getSearch(entry):
    #print entry
    #return 1,2,3
    a=entry['track']+' '+entry['artist']+' '+entry['album']
    a=urllib.quote(' '.join(a.split()),'')

    b=entry['track']+' '+entry['artist']
    b=urllib.quote(' '.join(b.split()),'')

    c=entry['track']
    c=urllib.quote(' '.join(c.split()),'')

    return a,b,c

import time,os

def reg_parse(xml):
    rep_tit = re.compile(r'\<location\>(.*?)\<\/location\>')
    rep_id = re.compile(r'\<mp3_url_id\>(.*?)\<\/mp3_url_id\>')
    atit=rep_tit.findall(xml)
    aid=rep_id.findall(xml)
    #print atit
    #print aid
    #sys.exit()
    ret=[]
    for i in range(len(atit)):
	a={}
	a['rloc']=atit[i]
	a['loc']=atit[i].split('/')[-1]
	a['id']=aid[i]
	ret.append(a)
    return ret

def getPath(song):
    path="./"
    path+='_'.join(song['artist'].split())+'/'
    path+='_'.join(song['album'].split())+'/'
    path+='_'.join(song['track'].split())+'/'
    if not os.path.exists(path):
        try:
            os.makedirs(path,int("0755",8))
        except(OSError):
            print 'os.makedirs('+path+') FAILED'
            #sys.exit(1)
    return path

def seeqSearch(query):
    nlids=[]
    rand=getRand9()
    search="http://www.seeqpod.com/api/seeq/search?rt=0&s=0&n=30&q="+query+"&rv=0&rm=1&rp=0&random="+rand
    data=""
    try:
        u=urllib2.urlopen(search)
        data=u.read()
        u.close()
    except urllib2.HTTPError:
	print 'HTTPError',query.replace('%20',' ')
    if not '<music>0</music>' in data:
        nlids=reg_parse(data)

    #print nlids
    return nlids



def getLids(nlids, lids, have):
    i=0
    l=len(nlids)
    while len(lids) < 5 and i < l:
	if not nlids[i]['rloc'] in have:
	    lids.append(nlids[i])
	i+=1
    return lids

def doSearch(a,b,c,have):
    lids=[]

    nlids=seeqSearch(a)
    #print nlids
    lids=getLids(nlids, lids, have)

    #print lids

    if len(lids) > 4:
	return lids

    nlids=seeqSearch(b)
    #print nlids
    lids=getLids(nlids, lids, have)

    if len(lids) > 4:
        return lids

    #print lids

    nlids=seeqSearch(c)
    #print nlids
    lids=getLids(nlids, lids, have)

    if len(lids) > 4:
        return lids

    #print lids

    if len(lids) > 5:
        lids=lids[:5]

    return lids



def leeq(song):
    global gids
    a,b,c=getSearch(song)
    path=getPath(song)
    p=path[:-1]+'.done'
    if os.path.exists(p):
	f=open(p,'rb')
	d=f.read().strip()
	f.close()
	if d != 'done':
	    print "Don't need",a.replace('%20', ' ')
	    return
	elif len(os.listdir(path))>=5:
	    print "Don't need",a.replace('%20', ' ')
            return
    have=[]
    pickf=path[:-1]+'.have'
    if os.path.exists(pickf):
        pick=open(pickf,'r')
	try:
	    have=pickle.load()
	except:
	    have=[]
	pick.close()
	
    #print a
    #print b
    #print c
    #print rand

    lids=doSearch(a,b,c,have)

    if len(lids) == 0:
	print 'No results for',a.replace('%20', ' ')
	return

    tdpre=re.compile(r'365.*Days.*Project')

    for l in lids:
	ls=os.listdir(path)
	if l['loc'] in ls:
	    print 'Already have',l['loc'].replace('%20', ' ')
	    continue
	rand=getRand9()
        murl="http://www.seeqpod.com/api/youtube.com/results?mp3_url_id="+l['id']+"&random="+rand
        #print murl, l['loc']
	data=""
	try:
            u=urllib2.urlopen(murl)
            data=u.read().strip()
	except urllib2.HTTPError:
            print 'HTTPError',l['loc'].replace('%20', ' ')
	    continue

	if data == "not_available":
	    print 'File not available?', a.replace('%20', ' ')
	    continue
	#print data
        u.close()
        #print data, l['loc']


	if tdpre.search(l['loc']):
	    print 'Not download 365 Days Project file',l['loc'].replace('%20', ' ')
	    continue

        print "Downloading",l['loc'].replace('%20', ' ')
	file=None
	try:
            u=urllib2.urlopen(data)
            file=u.read()
            u.close()
	except urllib2.HTTPError:
            print 'HTTPError',l['loc'].replace('%20', ' ')
	    continue

        print "Saving",l['loc'].replace('%20',' ')
	outp=path+l['loc'].replace('%20','_')
	outp=outp.replace(' ','_')
        out=open(outp,'wb')
        out.write(file)
        out.close()
	have.append(l['rloc'])
        time.sleep(random.randrange(5,20))

    if len(os.listdir(path))>=5:
	f=open(p,'wb')
	f.write('done')
	f.close()
    elif len(os.listdir(path))<5 and os.path.exists(p):
        f=open(p,'rb')
        d=f.read().strip()
        f.close()
        if d == 'done':
	    os.remove(p)
    if have:
	pick=open(pickf, 'wb')
	pickle.dump(have,pick)
	pick.close()

def main():
    global glabels
    args=parseopts(sys.argv[1:])
    url = "http://feeds.pandora.com/feeds/people/"+args[0]+"/favorites.xml"
    data=""
    try:
        u=urllib2.urlopen(url)
        data=u.read()
        u.close()
    except urllib2.HTTPError:
        print 'HTTPError',url
	sys.exit(1)
    #f=open('favorites.xml','r')
    #data=f.read()
    #f.close()
    p.Parse(data)
    #for entry in glabels:
#	s1,s2,s3=getSearch(entry)
    for lab in glabels:
	#try:
        leeq(lab)
	#except:
	#    print 'Fuck'


if __name__ == "__main__":
    main()
