#coding:utf-8

import requests
from bs4 import BeautifulSoup
import re
# 引入模块
import os
import threading
import socket
import sys
import urllib

# 要爬的地址
response = requests.get(
    url='http://www.psttt.com/'
)

index = 'http://www.psttt.com'

mp3_name_list = []

# 设置编码
response.encoding = response.apparent_encoding
# 获取页面对象
soup = BeautifulSoup(response.text, features='html.parser')

toolbox = soup.find_all(class_='pingshu_5tps_i')
a_list = toolbox[1].find_all('a')

a_response = requests.get(index + a_list[4].attrs.get('href'))
a_response.encoding = a_response.apparent_encoding
a_html_soup = BeautifulSoup(a_response.text, features='html.parser')

li_list = a_html_soup.find(class_="bfdz").find_all('li')

aaa = a_html_soup.find(class_="bfdz").find_all('a')

for a in aaa:
    name = a.attrs.get("title")
    mp3_name_list.append(name)

# 为了取到url而做的操作
content_html = li_list[0].find('a').attrs.get('href')

content_response = requests.get(index + content_html)
content_response.encoding = content_response.apparent_encoding
content_soup = BeautifulSoup(content_response.text, features='html.parser')

src = content_soup.find(id="play").attrs.get("src")

iframe = requests.get(index + src)
iframe.encoding = iframe.apparent_encoding

html = iframe.text

# ===================================  正则 ===================================

str2 = re.compile("\"setMedia\",{([\s\S]*?)}").findall(html.replace(' ', ''))[0]

urlList = re.compile("\+([\s\S]*?)\+").findall(str2)

sprUrl = re.compile(urlList[0] + " = '([\s\S]*?)'").findall(html)

content_url = re.compile("mp3:'([\s\S]*?)'").findall(str2)[0]

if content_url.endswith("+'"):
    content_url = sprUrl[1]
elif not content_url.endswith(".mp3"):
    content_url = content_url + sprUrl[0]

if content_url.endswith("?"):
    content_url = content_url.split("?")[0]

mp3_url = content_url.split(content_url.split("/")[-1])[0]

print (mp3_url)


# ===================================  正则 ===================================

def download(url, filename):
    ''' download mp3 file '''
    errno = None
    try:
        urllib.urlretrieve(url, filename)
    except socket.error:
        errno = sys.exc_info()[0]
    if errno == socket.timeout:
        print ("There was a timeout")


class DownloadThread(threading.Thread):
    ''' dowanload thread class '''

    def __init__(self, url, savePath):
        threading.Thread.__init__(self)
        self.url = url
        self.savePath = savePath

    def run(self):
        download(self.url, self.savePath)


def get_mp3(baseUrl, loadUrl_list, start_num):
    loadUrl = "C:/mp3/"
    threadMax = 10
    totle = len(loadUrl_list[start_num:])
    index = start_num

    while index < totle:
        thread_list = []
        if totle - index >= threadMax:
            for i in range(0, threadMax):
                #print str(baseUrl + loadUrl_list[index]).decode('utf-8')
                uploadurl = str(baseUrl + loadUrl_list[index+i]).encode('utf-8')
                print (uploadurl)
                t = DownloadThread(uploadurl, str(loadUrl + loadUrl_list[index+i]).decode('utf-8'))
                t.start();
                #print '开始下载第', index + i, '章'
                thread_list.append(t)
        else:
            for i in range(0, totle - index):
                for i in range(0, threadMax):
                    t = DownloadThread(baseUrl + loadUrl_list[index+i], str(loadUrl + loadUrl_list[index+i]).decode('utf-8'))
                    t.start();
                    #print '开始下载第', index + i, '章'
                thread_list.append(t)

        index = index+threadMax

        for item in thread_list:
            item.join(120)




    print ('结束下载')


get_mp3(mp3_url, mp3_name_list, 0)
