# -*- coding:utf-8 -*-
import requests,imghdr,os,re
from concurrent.futures import ThreadPoolExecutor#,as_completed


UNITY_ENCODING = 'UTF-8'
HEADERS = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
          ,'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' # 客户端能够接收的内容类型
          ,'Accept-Language': 'en-US,en;q=0.5' # 浏览器可接受的语言
          ,'Connection': 'keep-alive' # 表示是否需要持久连接
          }
'''
发起request得到html文本
'''
def getHtmlByUrl(_url):
    if not isUrlOK(_url):
        return
    _response = requests.get(url=_url,headers=HEADERS)
    #_response.encoding = UNITY_ENCODING
    _content = _response.content
    return _content.decode(UNITY_ENCODING)

'''
判断url是否可访问
'''
def isUrlOK(_url):
    if requests.head(_url).status_code not in (200,403):
        print('the url %s is not good.' % _url)
        return False
    return True

'''
下载图片
'''
def downloadPic(_sourceUrl,_target_file):
    if not isUrlOK(_sourceUrl):
        return
    _response = requests.get(url=_sourceUrl,headers=HEADERS)
    _content = _response.content
    _temp_name = _target_file + '.temp'
    _pic = open(_temp_name,'wb')
    _pic.write(_content)
    _pic.close()
    pic_type = imghdr.what(_temp_name)
    if pic_type is not None:
        os.rename(_temp_name,_target_file + '.' + pic_type)

def downloadArchive(_sourceUrl,_target_file):
    if not isUrlOK(_sourceUrl):
        return
    _response = requests.get(url=_sourceUrl,headers=HEADERS)
    _content = _response.content
    _arc = open(_target_file,'wb')
    _arc.write(_content)
    _arc.close()

'''
正则匹配目标数据 返回list
'''
def getListOfTargetByHtmlViaExp(_html,_exp):
    return re.compile(_exp).findall(_html)

#import io
#import sys
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') #改变标准输出的默认编码

'''
写入文本
'''
def writeFile(_content,_file):
    _f = open(_file,'w',encoding=UNITY_ENCODING)
    if _content is None:
        _f.close()
        return
    _f.write(_content)
    _f.close()



'''全局字典'''
k_v = {}
'''计数'''
total_num = 0

'''
解析一个url
'''
def processOneUrl(_url):
    html = getHtmlByUrl(_url)#.replace('\r','').replace('\n','').replace('<br>','').replace('<br />','')
    '''global total_num
    total_num += 1
    writeFile(html,'./%d chepai.html' % total_num)
    exit()'''
    exp = '''<img loading="lazy" class="alignnone size-full" src="(.+?)" alt="'''
    urlInfoList = getListOfTargetByHtmlViaExp(html,exp)
    #if total_num == 6 :
    #    writeFile(html,'./%d chepai.html' % total_num)
    #    urlInfoList = urlInfoList + [('50','wisconsin','//image.xcar.com.cn/attachments/a/day_140418/2014041811_d1308d0d625f21e912b4Mn1QGFvZNjID.jpg'),('51','wyoming','//image.xcar.com.cn/attachments/a/day_140418/2014041811_56e6e91f4594e12b7608s0rmLLjZ60tY.jpg')]
    print(urlInfoList)
    #print(len(urlInfoList))
    #exit()
    with ThreadPoolExecutor(max_workers=8) as pool:
        #threadList = []
        for i in range(len(urlInfoList)):
            t = pool.submit(downloadPic,urlInfoList[i],'./temp/'+str(i))
            #threadList.append(t)
        #for future in as_completed(threadList):
        #    data = future.result()
        #    print(f'main:{data}')
        pool.shutdown()



'''
主函数
'''
def mainProcess():
    urlList = ['http://tzlure.com/luresxj.html']
    #for i in range(2,7):
    #    urlList.append('')
    print(urlList)
    for _u in urlList :
        processOneUrl(_u)
'''    print(k_v)
    for k,v in k_v.items():
        print(k,v)'''

if __name__ == '__main__':
    #mainProcess()
    downloadArchive(r'https://img2.goodfon.com/original/2560x1440/2/e7/devushka-sport-ganteli-shorty.jpg','./devushka-sport-ganteli-shorty.jpg')

