"""参考视频   https://www.youtube.com/watch?v=VkD2Wx14p-U&list=PLwDQt7s1o9J4DqlcIaDLJPnP902mZs0Js&index=5&t=6s
           https://www.youtube.com/watch?v=Z2QvqN_RX6M
url = 'https://aregsr.vip/forum-253-'
国模套图 832页
"""

import os
import xlwt
import time
import pandas as pd

import requests
from requests.cookies import RequestsCookieJar
import urllib3
import re
import threading
import json

#设置最大线程锁
thread_lock = threading.Semaphore(value=30)


#
# # 自动登录
# drive = webdriver.Chrome()
# drive.get('https://aregsr.vip/')
# drive.find_element_by_id('goin').click()
# drive.find_element_by_id('ls_username').send_keys('ggfygg')
# drive.find_element_by_id('ls_password').send_keys('fhrAKMv@A4NXLkN')
# drive.find_element_by_class_name('mem_login').click()
# time.sleep(2)
#
# # selenium保存cookies

#
# cookies = drive.get_cookies()
# with open("cookies.txt", "w") as fp:
#     json.dump(cookies, fp)
#
# requests读取cookies

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

s = requests.session()
s.verify = False
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
# s.get("http://www.baidu.com")

# 这里我们使用cookie对象进行处理
jar = RequestsCookieJar()
with open("cookies.txt", "r") as fp:
    cookies = json.load(fp)
    for cookie in cookies:
        jar.set(cookie['name'], cookie['value'])



url = 'https://aregsr.vip/forum-253-'


#翻页模块,并且获取不同文件夹的链接

for i in range(1, 832):
    time.sleep(0.1)
    try:
        url = "https://aregsr.vip/forum-253-{}.html".format(i)

    # for i in range(3):
    #
    #     url = url+str(i+1)+'.html'
        print(url)

        r = s.get(url, headers=headers, cookies=jar)
        r.encoding = "utf-8"
        html1 = r.text
        #</a>]</em> <a href="thread-3585492-1-831.html" onclick
        pattern = r'</a>]</em> <a href="(.*?)".*?'
        img_url = re.findall(pattern, html1)
            # print(img_url)

        book2 = xlwt.Workbook(encoding="utf-8", style_compression=0)
            # 创建一个sheet对象，一个sheet对象对应Excel文件中的一张表格。
        sheet = book2.add_sheet('test01', cell_overwrite_ok=False)

        #每页新建一个文件夹
        folder_path2 = "F:\\国模套图\\" + str(i)
        if not os.path.exists(folder_path2):
            os.mkdir(folder_path2)

            # 填入第一列
        for j in range(0, len(img_url)):
                sheet.write(j, 0, img_url[j])
            # 填入第二列
            # for i in range(0, len(Income)):
            #     sheet2.write(i + 1, 1, Income[i])

            # 填入第一行
            # for i in range(0, len(Project)):
            #     sheet2.write(0, i, Project[i])
            # 最后，将以上操作保存到指定的Excel文件中
        book2.save('tttt7.xls')
            #打开链接
        df = pd.read_excel("tttt7.xls", header=None)

        url2 = df[0].tolist()

    #下载图片模块
        try:
            for k in range(len(url2)):

                print(url2[k])
                url3 = 'https://aregsr.vip/'+url2[k]


                r = s.get(url3, headers=headers, cookies=jar)
                r.encoding = "utf-8"
                html2 = r.text
                #"file="http://img7.uploadhouse.com/fileuploads/18164/181644366e91a05ec489719fa9cbbe8dafada912.jpg" onmouseover="
                pattern1 = r'file="(.*?)".*?>'
                img_urls2 = re.findall(pattern1,html2)
                if img_urls2:
                    img_names = [k.rsplit('/',maxsplit = 1)[1] for k in img_urls2]
                    #print(img_names)


                    #data-clipboard-text="【原创】天气热~对面楼里还真是好风景啊，让我大饱眼福【22P】,https://www.chinase.space/?x=163609294" >
                    pattern2 =r'data-clipboard-text=(.*)】'
                    file_names1 = re.search(pattern2,html2)
                    #file_names2 = [k.rsplit('=',maxsplit = 1)[1] for k in file_names1]
                    # file_names2 = file_names1[20:]
                    # print(file_names1)
                    # # print(file_names1)
                    file_names2 = file_names1.group()
                    file_names3 = file_names2.replace( '"', '' )
                    file_names4 = file_names3[20:]
                    file_names5 = file_names4.replace('/','，')
                    print(file_names5)



                    # # 创建文件夹
                    folder_path = folder_path2+"\\"+file_names5
                    # os.mkdir(folder_path)
                    # folder_path = Path("F:/sex_pic/" + file_names1 + "/")
                    #判断是否存在文件夹
                    if not os.path.exists(folder_path):
                        os.makedirs(folder_path)


                    def get_allpic(url4, m):
                        try:
                            picture = s.get(url4, headers=headers, cookies=jar,timeout = 3)
                            with open(folder_path + '\{}.jpg'.format(m+1), 'wb') as f:
                                f.write(picture.content)
                        except:
                            print('no')
                        # 下载完毕，多线程解锁
                        thread_lock.release()


                    #下载图片

                    for m in range(len(img_urls2)):

                        """print(img_urls2[m])"""
                        #多线程上锁
                        thread_lock.acquire()
                        t = threading.Thread(target=get_allpic,args=(img_urls2[m],m))
                        t.start()


                # time.sleep(random.randint(1, 3))
        except Exception as e:
            with open('./异常7.txt','a',encoding='utf-8') as  fp:
                fp.write(str(e) + url2[k] + '\n')
    except:
        print('翻页出错')































