import requests
from threading import Thread
from queue import Queue
from bs4 import BeautifulSoup
import time
from urllib.parse import urlparse,urljoin

"""
线程类
"""


class GetHtml(Thread):
    """广场舞信息"""
    def __init__(self,html_queue,data_queue):
        Thread.__init__(self)
        self.html_queue = html_queue
        self.data_queue = data_queue

    def run(self):
        self.headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
            'Cookie': 'UM_distinctid=1763d2c952525d-0cd4dce4f1adf7-5a472316-144000-1763d2c95264e5; CNZZDATA6291=cnzz_eid%3D2113852903-1607340375-null%26ntime%3D1607340375; Hm_lvt_9574d394f32beb1fd4c7243f3661853c=1607344101; u7CmAz9lcheckregkey=1607344840%2C0f04d610501dd2ace9665ae4e5687109%2C1fbcd02b7cf88e52d11578452387e5c1; Hm_lpvt_9574d394f32beb1fd4c7243f3661853c=1607344841'
        }
        while self.html_queue.empty()==False:
            url=self.html_queue.get()
            response=requests.get(url,headers=self.headers)
            response.encoding='gbk'
            if response.status_code==200:
                self.data_queue.put(response.text)

"""
解析类
"""


class GetData(Thread):
    def __init__(self,data_html):
        Thread.__init__(self)
        self.data_html = data_html

    def run(self):
        """运行爬虫"""
        while self.data_html.empty()==False:
            html=self.data_html.get()
            soup=BeautifulSoup(html,'lxml')
            data=soup.select('div.item-unit.fx-tv')
            try:
                for d in data:
                    title=d.select_one('p.item-dp').text
                    num=d.select_one('i.mark-update').text.replace('播放：',' ').strip()
                    img_url=d.select('div.pic.fs-seeds-panel img')[0]['src']
                    url=d.select('div.pic.fs-seeds-panel a')[0]['href']
                    print('视频描述：{} 播放数量：{}  图片链接：{}'.format(title,num,'http://v.chinadance.cn'+img_url))
                    print('详情页：{}'.format('http://v.chinadance.cn'+url))
            except:
                pass


def main():
    s=time.time()
    html_queue = Queue()
    data_queue = Queue()
    start_url = 'http://v.chinadance.cn/guangchangwu/jiaoxue/index_{}.html'
    for i in range(1, 51):
        url = start_url.format(i)
        html_queue.put(url)

    html_list = []
    for i in range(5):
        html_info = GetHtml(html_queue, data_queue)
        html_list.append(html_info)
        html_info.start()

    for html in html_list:
        html.join()

    data_list = []
    for i in range(5):
        data_info = GetData(data_queue)
        data_list.append(data_info)
        data_info.start()

    for data in data_list:
        data.join()
    print('多线程爬取消耗时间；',time.time()-s)


if __name__ == '__main__':
    main()