import urllib.request
import os
import time
import re
import urllib.error

## 打开煎蛋网页面
def openJianDan(url):
    if url.find('http:') == -1 :
        url = 'http:' + url
    try:
        res = urllib.request.Request(url)
        ## 模拟电脑环境
        res.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36')
        response = urllib.request.urlopen(url,timeout=10)
        html = response.read()
        return html
    except Exception:
        print("糟糕，爬取网站的时候出现了点错误：本次的url是：" + url)
        return -1

## 下载网页上图片内容
def downloadImg(path,fileName):
    ## 循环次数
    count = 0
    ## 校验是否全部下载成功
    isDownloading = True
    with open(fileName,'r') as imgList:
        for img in imgList:
            count +=1
            saveImgName = path + '\\' + img.strip().split('/')[-1]
            if os.path.exists(saveImgName):
                pass
                print("该目录已存在，本次跳过" + str(count))
            else:
                html = openJianDan(img.strip())
                if type(html) == int:
                    isDownloading = False
                    pass
                else:
                    with open(saveImgName,'wb') as f:
                        f.write(html)
    return isDownloading
            
## 返回本页面图片路径
def findByImageList(html):
    html = html.decode('utf-8')
    imgAddrs = []
    #下载本页图片信息 匹配到图片 +表示匹配一次或多次 （）表示之获取里面的数据 ^表示取反
    imgRule = r'<img.+src="([^"]+\.jpg)'
    imgAddrs = re.findall(imgRule,html) 
    return imgAddrs

## 返回下一个url路径
def seekNextUrl(html,urldict):
    html = html.decode('utf-8')
    ## 判断是否存在，存在返回下标值
    hrefRule = r'<a.+href="(//jandan.net/.+\#comments)'
    hrefList = re.findall(hrefRule,html)
    for each in hrefList:
        if each in urldict:
            ## 存在不做操作，跳过
            pass
        else:
            ## 不存在则返回
            return each
    return -1