<<<<<<< HEAD
=======
<<<<<<< HEAD
import requests
import re
from urllib.request import urlretrieve  # 下载图片用
import urllib
import random
import time
import threading
import os
from requests.adapters import HTTPAdapter


opener = urllib.request.build_opener()
ua_list = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0',
           'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
           'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.114 Safari/537.36 Edg/103.0.1264.62',
           'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0',
           'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0'
           ]
opener.addheaders = [('User-Agent', random.choice(ua_list))]
urllib.request.install_opener(opener)


def downPercent(num, size, total):  # 数据块数量，大小，总的数据量
    result = num * size / total
    result = 1.0 if result > 1.0 else result  # 如果超过了1则下载完成了
    if (result > 0.95):  # 下载到95以上输出下载状态
        print("当前的进度:{}".format(result * 100))


def thre_num(total, num):
    pages = [x for x in range(1, total + 1)]  # 页码
    point = round(total / num)  # 分割点，四舍五入计算
    pages = pages[0: total + 1: point]  # 按照线程数量进行切分
    if len(pages) - 1 != num:  # 如果分割的页码与线程数不匹配
        pages.append(total)  # 那就表明最后一个线程未分配好，将总页码数加进去即可
    pagenum = []
    for x in range(0, len(pages) - 1):  # 防止list溢出，所以要减去1
        '''因为循环range是左闭又开的关系，所以当判断到最后一页没计算到的时候，就单独处理最后一页并且+1处理'''
        pagenum.append(tuple((pages[x], pages[x + 1] if x != len(pages) - 2 else pages[x + 1] + 1)))
    return pagenum


def down_image(urls, s, e, save_folder):
    ss = requests.Session()
    ss.mount('http://', HTTPAdapter(max_retries=5))
    ss.mount('https://', HTTPAdapter(max_retries=5))
    for url in urls[s - 1:e - 1]:
        print("正在下载:{}".format(url))
        try:
            html = requests.get(url, timeout=15).content.decode('utf-8')
            image_url = re.findall(r'<img src="https://(.*?)"', html)  # 图片链接
            image_url = "https://" + image_url[0]
            # print("img_url:", image_url)
            down = re.findall(r'<a href="https(.*?)"', html)[0]  # 磁力链接，作为文件名
            path = save_folder + "\\" + down[-40:] + ".jpg"
            urlretrieve(image_url, path)
            # time.sleep(1)
        except Exception as e:
            print(e)
            print(html)


if __name__ == '__main__':
    for page in range(1, 4):
        # page = 11
        save_folder = "F:\image_url\\{}".format(page)
        if not os.path.exists(save_folder):
            os.mkdir(save_folder)
        html = requests.get('https://yj2207.link/pw/thread1022.php?fid=219&page={}'.format(page))
        t = html.content.decode('utf-8')
        link = re.findall(r'href="html_data/(.*?).html"', t)
        link = set(link)
        temp = []
        for l in link:
            temp.append("https://yj2207.link/pw/html_data/" + l + ".html")

        pages = thre_num(len(temp), 4)
        threads = []
        for page in pages:
            t = threading.Thread(target=down_image, args=(temp, page[0], page[1],save_folder, ))
            threads.append(t)
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()
=======
from openpyxl import load_workbook
from openpyxl.utils import get_column_interval
me = "D:\数据对比\\me.xlsx"
gy = "D:\数据对比\\gy.xlsx"

# wbme = load_workbook(me)
# wbgy = load_workbook(gy)
# list_me = []
# list_gy = []
# ws1 = wbme[wbme.sheetnames[0]]
# ws2 = wbgy[wbgy.sheetnames[0]]
# flag = 1
# print(get_column_interval(1,2))
# for i in ws1.columns:
#     if flag == 1:
#         flag = 2
#         continue
#     else:
#         print(i)
#         for k in i:
#             print(k.value)
#         print("--------------------------------")

k = r"D:\qq接受的文件\Tencent Files\1229851357\FileRecv\重庆环科院\重庆环科院\SURF_CLI_CHN_MUL_DAY_V3.0\datasets\\PRE\SURF_CLI_CHN_MUL_DAY-PRE-13011-195103.TXT"
import os

t = [1,2,3]
t.insert(1, 5)
print(t)
>>>>>>> 9b988744b9094e235d0206c22ab1011802914ecc
>>>>>>> 0f0c72be4dcc2e15fd778f6cbc949159bd679275
