import requests
import re
import urllib,time
import threading
import random


ip_s = [
    'http://118.212.137.135:31288',
    'http://116.213.98.6:8080',
    'http://114.215.95.188:3128'
    ]

def get_procie():
    ip = random.choice(ip_s)
    proxy = {'http':ip}
    return proxy



def getheaders():
    user_agent_list = [ \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]
    UserAgent=random.choice(user_agent_list)
    headers = {'User-Agent': UserAgent}
    return headers


#urllib.request.urlretrieve('http://www.chenjinwei.com/test.txt','F:\\PythonDemo\\Crawl_Demo\\Crawl_Demo\\Test3\\m4d\\test.txt')
m4a_list = []
name = 0
def go(page):
    try:
        pages = []
        print('开始爬取第 '+ str(page) +' 页')
        URL = 'http://www.ximalaya.com/4742669/album/4137349/?page=' + str(page) 
        ip_1 = get_procie()
        print(ip_1)
        html = requests.get(URL,headers=getheaders(),proxies=ip_1,timeout=20)
        ids = re.findall('href="/4742669/sound/(.*?)/" hashlink title',html.text)
        pages.extend(ids)
        datas = list(set(pages)) #set方法去重
        print(datas)
        print('====================================================================')
        print('====================================================================')
        print('====================================================================')
        
        for m in datas:
            #global name
            #lock.acquire()      #上锁，acquire()和release()之间的语句一次只能有一个线程进入，其余线程在acquire()处等待  
            #name += 1
            #lock.release()      #解锁  
            #print('爬取第 '+ str(name) +' 个')
            time.sleep(2)
            url = 'http://www.ximalaya.com/tracks/'+ m +'.json'
            ip = get_procie()
            print(ip)
            try:
                html1 = requests.get(url,headers=getheaders(),proxies=ip,timeout=20)
                m4a = html1.json()['play_path_64']
                print(m4a)
                lock.acquire()
                with open('Test3/m4d/m4a.txt','a') as f:
                    f.write(m4a + '\n') 
                lock.release()      #解锁 
                m4a_list.append(m4a)
            except Exception as e:
                print(str(e))
                continue
    except Exception as ee:
        print('第'+str(page)+'出错')


lock = threading.RLock()    #创建 可重入锁 



def download(url,name):

    path = 'F:\\PythonDemo\\Crawl_Demo\\Crawl_Demo\\Test3\\m4d\\'+ str(name) + '_' + m + '.m4a'
    #uru = urllib.request.urlretrieve(m4a,'F:\\PythonDemo\\Crawl_Demo\\Crawl_Demo\\Test3\\m4d\\'+m+'.m4a')
    #uru.close()
    try:  
    # python自带的下载函数  
        urllib.request.urlretrieve(m4a, path)  
    except IOError as e: # 如果找不到，好像会引发IOError。  
        print("第"+str(name)+"个m下载出错：",e)  




def main():
    list = []
    for i in range(1,6):
        t = threading.Thread(target=go,args=(i,))
        list.append(t)
    for u in list:
        u.start()
    for u in list:
        u.join()

    
    for u in m4a_list:
        print(u)
    #    t = threading.Thread(target=download ,args=(u,))
    #    list.append(t)
    #for u in list:
    #    u.start()
    #for u in list:
    #    u.join()
if __name__ == '__main__':
    main()

print('结束')