#!/usr/bin/env python
#coding = utf-8
'''
本爬虫是用来爬取6V电影网站上的电影资源的一个小脚本程序，爬取到的电影链接会通过网页的形式显示出来
'''
import requests
import re
from bs4 import BeautifulSoup as bs
from queue import Queue
# from other import getUser_Agent
import threading
import sys
import time
from tqdm import tqdm

headers = {
    'Connection':'close',
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}

domain = 'http://www.6vhao.tv'
 
 
class Movielinks(threading.Thread):
 
    def __init__(self,que,filepath,totalcount,starttime):
        threading.Thread.__init__(self)
        self._que = que
        self._filepath = filepath
        self._totalcount = totalcount
        self._starttime = starttime
 
 
    def run(self):
        try:
            while not self._que.empty():
                url = self._que.get()
                threading.Thread(target=self.showdetail).start()
                self.spider(url)
        except Exception as e:
            print('error--->def run(self):', e)
 
 
    def spider(self,url):
        try:
            r = requests.get(url,headers)
            file = open(self._filepath,'a+')
            if r.status_code == 200:
                soup = bs(r.content.decode('gbk'),'html.parser')
                links = soup.find_all('td')
                img = soup.find(id="text").find('img')['src']
                title = re.search(r'《(.*?)》',soup.title.string)
                regexp = re.compile('上映日期.*?')
                date = str(soup.find(text = regexp))
 
                # print(title.group())
 
                for link in links:
                    if '下载帮助' not in str(link):
                        hrefs = link.find_all('a')
                        if '在线观看' not in str(link):                        
                            continue

                        file.write('<li style="list-style-type: decimal;"><a href="%s" target="%s"><img alt="%s" src="%s" width="154" height="156">%s</a>%s</li>' % (link.find('a')['href'],'_blank',title.group(), img, title.group(), date))
                        file.write('\n<br>')
                            # print('-------test1: '+str(link))
                        # print(link)
                        # print('**' * 100)
                        # mima = re.search(r'</a>(.*?)</td>',str(link))
                        # print(mima.group())
                        for href in hrefs:
                            break
                            if 'ed2k' in href['href'] or '.torrent' in href['href'] or 'thunder' in href['href'] or 'ftp' in href['href'] or 'magnet' in href['href']:
                                # sys.stdout.write('\r'+'\t\t迅雷下载链接：%s'%href['href'])
                                file.write('<li style="list-style-type: decimal;">迅雷下载链接：<a href="%s" target="%s">%s</a></li>' % (href['href'],'_blank',title.group()))
                                file.write('\n<br>')
                            elif 'baidu' in href['href'] and re.search(r'密码：\w{4}',str(link)):
                                wangpan_password = re.search(r'密码：\w{4}',str(link))
                                # sys.stdout.write('\r'+'\t\t百度网盘下载链接：%s    网盘%s'%(href['href'],wangpan_password.group()))
                                file.write('<li style="list-style-type: decimal;">百度网盘链接：<a href="%s" target="%s">%s</a><b>%s</b></li>' % (href['href'], '_blank', title.group(),wangpan_password.group()))
                                file.write('\n<br>')
                            elif '正版观看' in str(href):
                                # print('\t\t %s  该影片需要正版观看！！' % title.group())
                                file.write('<b><font color="#00ff00">%s需要正版版权才能观看</font></b>' % title.group())
                                file.write('\n<br>')
                            else:
                                file.write('<b><font color="#ff0000">%s 的该条链接无法正常爬取，尽情谅解</font></b>' % title.group())
                                file.write('\n<br>')
                                # sys.stdout.write('\r'+'\t\terror--->def spider(self,url)-in:%s'%title.group())
                # print()
                file.write('<br>')
                file.close()
            else:
                print('%s 该磁力链接已坏！！' % url)
        except Exception as e:
            print('爬取失败：', e)
            # print('error--->def spider(self,url)-out:%s' % title.group())
            file.write('<b><font color="#ff0000">%s 无法正常爬取，尽情谅解</font></b>' % title.group())
 
 
    def showdetail(self):
        usetime = time.time() - self._starttime
        per = 100 - (float(self._que.qsize())/float(self._totalcount)) * 100
        sys.stdout.write('\r'+'下载链接进度：%.2f %s  用时：%.3f 秒' % (float(per),'%',float(usetime)))
 
 
def getMovieCount(searchid):
    url = domain + '/e/search/result/?searchid=' + str(searchid)
    r = requests.get(url,headers)
    soup = bs(r.content,'html.parser')
    divs = soup.find(name='div',attrs='channellist')
    pages = re.search(r'\d+',divs.h2.string)
 
    print('%s 一共有%2.f页'%(divs.h2.string,float(pages.group())/20))
 
 
 
def getWantPagesUrls(url,startpage,endpage,searchid):
    urls = []
    for i in range(startpage,endpage+1):
        link = domain + '/e/search/result/index.php?page='+str(i)+'&searchid='+str(searchid)
        urls.append(link)
 
    return urls
 
 
def getpagesLinks(urls): # 返回一个电影名字和链接对应的字典
    pageslink_dic = {}
    moviename_list = []
    moviehref_list = []
 
    for url in urls:
        r = requests.get(url,headers)
        soup = bs(r.content,'html.parser')
        divs = soup.find_all('div',class_='listimg')
        for div in divs:
            moviehref = div.find('a')['href']
            moviename = div.find('a').img['alt']
            moviehref_list.append(domain + moviehref)
            # moviename_list.append(moviename)
    # for i,name in zip(range(len(moviename_list)),moviename_list):
    #     pageslink_dic[name] = moviehref_list[i]
    # print(moviehref_list)
 
    return moviehref_list
 
 
def getqueue(urls):
    que = Queue()
 
    for url in urls:
        que.put(url)
 
    return que
 
def main():
    tishi = '''
           *6v电影网站电影链接爬虫*
       #要搜索的常用关键字代码#
           1.韩国--->185773
           2.日本--->185691
           3.国产--->186504
           4.美国--->187181
           5.英国--->188161
           6.香港--->188461
           7.喜剧--->185441
           8.恐怖--->187193
           9.悬疑--->190226
           10.记录片--->187963
           11.科幻--->189866
           12.战争--->187830
           13.动画--->187978
 
       '''
    print(tishi)
 
    keywords = {1:185773,2:185691,3:186504,4:187181,5:188161,6:188461,7:185441,8:187193,9:190226,10:187963,11:189866,12:187830,13:187978}
 
    threads = []
    url = domain + '/e/search/result/index.php?page='
 
    searchid = input('请输入搜索关键字序号>>>')
    print('正在计算电影总量... ...')
    getMovieCount(keywords[int(searchid)])
    startpage = int(input('请输入起始页码>>>'))
    endpage = int(input('请输入终止页码>>>'))
    # filepath = input('请输入电影链接要保存的文件路径（扩展名是：.html）')
    filepath = '/Users/bruce/Downloads/test/1.html'
 
    print('The program is running,Please waiting... ...')
 
    urls = getpagesLinks(getWantPagesUrls(url,startpage,endpage,keywords[int(searchid)]))
 
    que = getqueue(urls)
 
    print('本次下载行为：从%d页到%d页，有%d部电影正在下载...'%(startpage,endpage,que.qsize()))
 
    thread_count = que.qsize()
    starttime = time.time()
 
    for i in range(thread_count):
        threads.append(Movielinks(que,filepath,thread_count,starttime))
 
    for t in threads:
        t.start()
 
    for t in threads:
        t.join()
 
 
 
if __name__ == '__main__':
    # ch = int(input('请输入选项（负数退出）>>>'))
    # while ch >0:
    #     main()
    #     ch = int(input('请输入选项（负数退出）>>>'))
    main()