# coding:utf-8
from ProgrameObj import ProgrameObj
from bs4 import BeautifulSoup
import threading
import urllib2
import os

max_thread = 20
lock = threading.RLock()


class Downloader(threading.Thread):
    def __init__(self, url, start_size, end_size, fobj, buffer):
        self.url = url
        self.buffer = buffer
        self.start_size = start_size
        self.end_size = end_size
        self.fobj = fobj
        threading.Thread.__init__(self)

    def run(self):
        with lock:
            print 'starting: %s' % self.getName()
        self._download()

    def _download(self):
        try:
            req = urllib2.Request(self.url)
            req.headers['Range'] = 'bytes=%s-%s' % (self.start_size, self.end_size)
            f = urllib2.urlopen(self.url, timeout=60)
            offset = self.start_size
            while 1:
                block = f.read(self.buffer)
                if not block:
                    break
                # 使用 with lock 替代传统的 lock.acquire().....lock.release()
                # 需要python >= 2.5
                with lock:
                    self.fobj.seek(offset)
                    self.fobj.write(block)
                    offset += len(block)
        except:
            print "出错啦!"


def main(url="http://newoss.maiziedu.com/qiniu/csshtml1.m4v", thread=3, save_file='', buffer=1024):
    # 最大线程数量不能超过max_thread
    thread = thread if thread <= max_thread else max_thread
    # 获取文件的大小
    req = urllib2.urlopen(url)
    size = int(req.info().getheaders('Content-Length')[0])
    # 初始化文件对象
    fobj = open(save_file, 'wb')
    # 根据线程数量计算 每个线程负责的http Range 大小
    avg_size, pad_size = divmod(size, thread)
    plist = []
    for i in xrange(thread):
        start_size = i * avg_size
        end_size = start_size + avg_size - 1
        if i == thread - 1:
            # 最后一个线程加上pad_size
            end_size = end_size + pad_size + 1
        t = Downloader(url, start_size, end_size, fobj, buffer)
        plist.append(t)
    # 开始搬砖
    for t in plist:
        t.start()
    # 等待所有线程结束
    for t in plist:
        t.join()
    # 结束当然记得关闭文件对象
    fobj.close()
    print 'Download completed!'

# 获取要解析的网页信息
soup = BeautifulSoup(open("index.html"),"html.parser")

# 获取网页大标题
print "title:", soup.title.string


# 获取"阶段"标题
def find_big_title():
    programe_list = []
    for item in soup.find_all("div", class_="zy_rightBoxDD"):
        li_soup = BeautifulSoup(unicode(item), "html.parser")
        for li_title in li_soup.find_all('li'):
            programe = ProgrameObj()
            programe.programe_num = li_title['go'][2:]
            programe.programe_title = li_title.p.text
            programe_list.append(programe)
    return programe_list

# 获取"阶段"下的章节标题
def find_small_title(programe_list):
    # 获取阶段标题
    for item in soup.find_all("div", class_="artc-bt"):
        item_soup = BeautifulSoup(unicode(item), "html.parser")
        if item.a:
            code_title = item.a['title']
            number = item.a['href'].split('=')[1]
            for programe_obj in programe_list:
                if programe_obj.programe_num == number:
                    programe_obj.in_programe_titles.append(code_title)
                    programe_obj.in_programe_hrefs.append("http://www.maiziedu.com"+item.a['href'])
    return programe_list

# 为每个阶段,每个章节创建对应的文件夹
programe_list = find_small_title(find_big_title())
for programe_obj in programe_list:
    print programe_obj.programe_title
    if not os.path.exists(programe_obj.programe_title):
        os.mkdir(programe_obj.programe_title)
        os.chdir(programe_obj.programe_title)
        for i in range(len(programe_obj.in_programe_titles)):
            in_title = programe_obj.in_programe_titles[i]
            href = programe_obj.in_programe_hrefs[i]
            if not os.path.exists(in_title):
                os.mkdir(in_title)
                os.chdir(in_title)
            print "  ", in_title
            response = urllib2.urlopen(href)
            file_handle = open("%s.html"%i, "wb")
            content = response.read()
            file_handle.write(content)
            file_handle.close()

            content_soup = BeautifulSoup(content, "html.parser")
            for li_content in content_soup.find_all("ul",class_="video_part_lists"):
                li_soup = BeautifulSoup(unicode(li_content), "html.parser")
                for li_href in li_soup.find_all('a'):
                    url = "http://www.maiziedu.com"+li_href['href']
                    last_response = urllib2.urlopen(url)
                    last_soup = BeautifulSoup(last_response.read(),"html.parser")
                    movie_url = unicode(last_soup.find_all("script", type="text/javascript")[1].text).split('=')[-1].strip()
                    print movie_url[1:-1]

                    # 单线程执行方法,多线程下载一个视频
                    # main(movie_url[1:-1],10,movie_url.split('/')[-1][:-1],4096)

                    # 多线程执行方法,多线程下载视频
                    # t = threading.Thread(target=main, args=(movie_url[1:-1],10,movie_url.split('/')[-1][:-1],4096))
                    # t.start()
                os.chdir(os.path.dirname(os.getcwd()))
        os.chdir(os.path.dirname(os.getcwd()))
