"""参考视频   https://www.youtube.com/watch?v=VkD2Wx14p-U&list=PLwDQt7s1o9J4DqlcIaDLJPnP902mZs0Js&index=5&t=6s
           https://www.youtube.com/watch?v=Z2QvqN_RX6M
url = 'https://aregsr.vip/forum-96-'
亚洲无码下载 3691页


"""
from pathlib import Path
import xlrd
import os
import xlwt
import random
import xlutils
import lxml
import pandas as pd
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import requests
from requests.cookies import RequestsCookieJar
import urllib3
import re
import threading


#设置最大线程锁
thread_lock = threading.Semaphore(value=30)



import json


# requests读取cookies

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

s = requests.session()
s.verify = False
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
# s.get("http://www.baidu.com")

# 这里我们使用cookie对象进行处理
jar = RequestsCookieJar()
with open("cookies.txt", "r") as fp:
    cookies = json.load(fp)
    for cookie in cookies:
        jar.set(cookie['name'], cookie['value'])



url = 'https://aregsr.vip/forum-96-'


#翻页模块,并且获取不同文件夹的链接

for i in range(3690, 3691):
    try:

        def get_alltitle(url5,k):
            print(url5)
            url3 = 'https://aregsr.vip/'+url5

            try:
                r = s.get(url3, headers=headers, cookies=jar,timeout=3)
                r.encoding = "utf-8"
                html2 = r.text

                #文件夹名字  匹配
                # data-clipboard-text="最美91女神火爆豪乳美少女〖不见星空〗性爱私拍流出 萝莉床边操 后入操女仆兔兔中 高清720P版【MP4/350MB】,https://www.wechatilne.space/?x=163609294" >选择复制</button>
                #data-clipboard-text="极品美乳，高端外围，蜂腰翘臀尤物，AV视角展示多种角度尽收眼底 [1440MB/MP4],https://www.xingba2017.com/?x=163609294" >选择复制</button>
                pattern2 =r'data-clipboard-text=(.*?)https'
                file_names1 = re.search(pattern2,html2)


                file_names2 = file_names1.group()
                file_names3 = file_names2.replace( '"', '' )
                file_names4 = file_names3[20:]
                file_names5 = file_names4[:-6]
                file_names6 = file_names5.replace('/','，')
                # print(file_names6)


                #匹配种子链接
                #  forum.php?mod=attachment&amp;aid=MzEyNTAxMzh8ZjI1YjY3MDh8MTYxNzQ1NDAwMnwxNjM2MDkyOTR8MTQ4NDczMzg%3D"
                pattern4 = r'aid=(.*?)"'
                zz_urls1 = re.search(pattern4, html2)
                zz_urls2 = zz_urls1.group()
                zz_urls3 = zz_urls2[:-1]
                zz_urls4 = 'https://aregsr.vip/forum.php?mod=attachment&'+zz_urls3
                # print(zz_urls4)

                # # 创建文件夹
                folder_path = folder_path2+"\\"+file_names6

                #判断是否存在文件夹
                if not os.path.exists(folder_path):
                    os.makedirs(folder_path)

                # time.sleep(3)

                # 图片  正则匹配
                #"file="http://img7.uploadhouse.com/fileuploads/18164/181644366e91a05ec489719fa9cbbe8dafada912.jpg" onmouseover="
                pattern1 = r'file="(.*?)".*?>'
                img_urls2 = re.findall(pattern1, html2)
                #下载图片
                try:
                    picture = s.get(img_urls2[0], headers=headers, cookies=jar)
                    with open(folder_path+'\{}.jpg'.format(1),'wb') as f:
                        f.write(picture.content)
                except:
                    print('no pic')


                # 下载种子

                try:
                    torrents = s.get(zz_urls4, headers=headers, cookies=jar)
                    with open(folder_path+'\{}.torrent'.format(1),'wb') as f:
                        f.write(torrents.content)
                    # print(torrents.text)
                except:
                    print('no torr')

            except Exception as e:
                with open('异常2.txt', 'a', encoding='utf-8') as  fp:
                    fp.write(str(e) + url2[k] + '\n')

            # 下载完毕，多线程解锁
            thread_lock.release()



        url = "https://aregsr.vip/forum-96-{}.html".format(i)


        print(url)

        r = s.get(url, headers=headers, cookies=jar)
        r.encoding = "utf-8"
        html1 = r.text
        #</a>]</em> <a href="thread-3585492-1-831.html" onclick
        pattern = r'</a>]</em> <a href="(.*?)".*?'
        img_url = re.findall(pattern, html1)
            # print(img_url)

        book2 = xlwt.Workbook(encoding="utf-8", style_compression=0)
            # 创建一个sheet对象，一个sheet对象对应Excel文件中的一张表格。
        sheet = book2.add_sheet('test01', cell_overwrite_ok=False)

        #每页新建一个文件夹
        folder_path2 = "F:\\亚洲无码下载\\" + str(i)
        if not os.path.exists(folder_path2):
            os.mkdir(folder_path2)

        # 填入第一列
        for j in range(0, len(img_url)):
            sheet.write(j, 0, img_url[j])
            # 填入第二列
            # for i in range(0, len(Income)):
            #     sheet2.write(i + 1, 1, Income[i])

            # 填入第一行
            # for i in range(0, len(Project)):
            #     sheet2.write(0, i, Project[i])
            # 最后，将以上操作保存到指定的Excel文件中
        book2.save('tttt4.xls')

        time.sleep(0.3)
            #打开链接
        df = pd.read_excel("tttt4.xls", header=None)

        url2 = df[0].tolist()

        # 进入每一个标题
        for k in range(len(url2)):
            """print(url2[k])"""

            # 多线程上锁
            thread_lock.acquire()
            t = threading.Thread(target=get_alltitle, args=(url2[k], k))
            t.start()
    except:
        print('翻页出错')






























