import requests
import threading
import time
from bs4 import BeautifulSoup
import os
import uuid


class myThread (threading.Thread):   # 继承父类threading.Thread
    def __init__(self, threadID, name, counter):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter

    # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
    def run(self):
        print("Starting " + self.name)
        myTask()
        print("Exiting " + self.name)


# =====================================================================================================Task

LinkList = []


def myTask():
    text = get_index("https://pmdaohang.com/")
    getPageUrl(text)
    for index in LinkList:
        linkList = GetPageImgUrl(get_index(index))
        print(linkList)
        for img in linkList:
            print(img)
            # download_img(img)

# url = "https://pmdaohang.com/"


# 获取服务器的页面数据
def get_index(url):
    try:
        respose = requests.get(url)
        respose.encoding = 'utf-8'
        if respose.status_code == 200:
            return respose.text
    except Exception as e:
        print('获取网页数据异常：'+str(e))
        return None


# 获取页面的所有连接
def getPageUrl(text):
    try:
        soup = BeautifulSoup(text, "html.parser")
        # pid = soup.findAll('p', {'class': 'list_tit'})
        a_list = soup.findAll('a')
        for aa in a_list:
            href = aa.get('href')
            # print(href)
            if href not in LinkList:
                # if href.endswith("html"):
                LinkList.append(href)
        print(len(LinkList))
    except Exception as e:
        print('error' + str(e))


# 获取页面的所有图片连接
def GetPageImgUrl(text):
    LinkListImg = []
    try:
        soup = BeautifulSoup(text, "html.parser")
        pid = soup.findAll('img')
        print('page image size:%d', len(pid))
        for aa in pid:
            href = aa.get('src')
            if href not in LinkListImg:
                LinkListImg.append(href)
            # print(LinkList)
    except Exception as e:
        print('error:' + str(e))
    return LinkListImg


#下载目录
root_path = 'F:/test2/'
#  下载图片
def download_img(url):
    pathname = uuid.uuid1()
    root = root_path + str(pathname)+"/"
    try:
        path = root + url.split("/")[-1]
        print(url)
        if not os.path.exists(root):
            os.makedirs(root)
        if not os.path.exists(path):
            r = requests.get(url)
            r.raise_for_status()
            print('picture name :'+path + ' dir path :'+root)
            # 使用with语句可以不用自己手动关闭已经打开的文件流
            with open(path, "wb") as f:  # 开始写文件，wb代表写二进制文件
                f.write(r.content)
            print("爬取完成")
        else:
            print("文件已存在")
    except Exception as e:
        print("爬取失败:" + str(e))


# hom
def main():
    for dd in range(0, 100):
        thread1 = myThread(dd, "Thread-"+str(dd), dd)
        thread1.start()


if __name__ == '__main__':
    main()

