import requests
import re
import os
import time
from urllib import request
from multiprocessing import Pool
import multiprocessing
import urllib3
urllib3.disable_warnings()


class Spider:

    def __init__(self, url_info):
        self.base_url = url_info
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        }
        self.sets = set()
    def files_name(self):
        f_name_time = str(int(time.time()*1000))
        return f_name_time

    def parse(self, data=None):
        print('---正在请求网址%s-----' % self.base_url)
        if 'www.ae.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, timeout=5, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'class="product-list">(.+?)<div class="filters-noresult">', re.S).findall(response.text)[0]
                    res_find = re.compile(r'class="img-placeholder".+?<img class="item active product-image product-image-front img-responsive lazyload".+?data-srcset="(.+?)">', re.S).findall(res)
                    return res_find
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.yse-paris.com' in self.base_url:
            self.headers['Host'] = 'www.yse-paris.com'
            self.headers['Referer'] = 'https://www.yse-paris.com/fr/lookbooks'
            try:
                response = requests.get(self.base_url, headers=self.headers, timeout=5, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'category__product-list js-product-list">(.+?)<div id="footer-container">', re.S).findall(response.text)
                    res_find = re.compile(r'<img class="product__img lazyload".+?data-src="(.+?)"').findall(res[0])
                    return res_find
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.freepeople.com' in self.base_url:
            try:
                self.headers['cookie'] = 'SSLB=1; split_tag_control=Google_DBM; FP_ATTR=other; _abck=BA4C30D532C0832FCC6B1ACE4F0B05563ADE1E2C2E340000932DEA5B14B36932~0~/0kWhEO1MKc4ai0MOFv+2spI0aXd8KjAKs1rXI3Fdrs=~-1~-1; _ga=GA1.2.1988059593.1542073751; urbn_email_signup_marketing_optin=true; _dy_cs_pinterest=true; SS_AFTERPAY=2; SSSC_A15=513.G6623156311410430772.3|36039.1046629:37328.1099788:37750.1120335:39619.1219143:39855.1228407; urbn_country=CN; urbn_currency=CNY; urbn_tracer=66DCDUACOR; siteId=fp-cn; urbn_edgescape_site_id=fp-cn; urbn_inventory_pool=INTL_DIRECT; urbn_language=zh-CN; AKA_A2=A; urbn_data_center_id=US-NV; urbn_channel=web; urbn_site_id=fp-cn; urbn_geo_region=AS-SG; ak_bmsc=C1E72CA34ACF17E9143034FEE21F90043ADE1E2F054B0000CB8FF35B0E337419~plyPUHZFZx5LasQ89ZxEIwU6BaaWAzVH0GyFuyYIyqNr/OuCc8YyT8czpRG3mpMoNpWzWv0h5cEFKC0vLlSsJtRr4chyelzarwDrTn/r+8X1UhpN6E4elZe8OmnZDKs4sq+FIxO6ppjjUv5kg4N4NmJNRJjI6GPNQRyumH6jv68Q9PaJiGpPVwgkwnll4yTj7+SzobDYQE3zx2/rRaSPN6zsRb75/89VxeSwmrKZr41Qo=; akacd_ss1=3720141513~rv=63~id=45aa97a60b9010e08a4f0b5e5833b302; bm_sz=F8710BFB6C156C110EA5FF0EC273D580~QAAQLx7eOloiWxdnAQAAwrFpL5Uekq6YZ1zRWSAWZRoeUv+YgCTuOiyk8D/8z7EqnD7zWIFRsE9+wY5ezGyodOvL32JS+JBjJterS3O939lDt0dMFcFBc8cdVFhMnk8Bu0kjwmvpVbo8R2VTQc3zRuYDvbsRGNGOdW10kWxip1bzduKQ92GDf846Fw3DqBwuRVUK; _gid=GA1.2.183162667.1542688716; _pxvid=257f3c10-ec7e-11e8-a89e-1d51f543f94d; urbn_search_provider=GBY; SSID_BE=CABBOh1GAAAAAACTLepbNFOBAZMt6lsDAAAAAAB2xNRdyo_zWwDRnK-bAAN3vhIAyo_zWwEAw5oAA0eaEgCTLepbAwB2kwADTxgRAJMt6lsDANCRAAMMyBAAyo_zWwEAx4wAA2X4DwCTLepbAwA; urbn_device_type=SMALL; SSRT_A15=bpLzWwADAA; urbn_auth_payload=%7B%22authToken%22%3A%20%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJmcCIsImlhdCI6MTU0MjY4OTM5MC43MzIwMTcsImRhdGEiOiJ7XCJjYXJ0SWRcIjogXCIxYXg1dmFFaURQaTVIUHhGQktnZklSMzl4cGxKTXFicnhCZkVYTzJ5WHRUc2VHbnlxQ1lnd3VPT2xSaUpac3E0NVUrM3JzMnJ4aGptNHBEMzBDY0lCdz09N2RiOWEwZWY5MGZhYTk4MDIyMTk5YWQ3MGU0N2UzZmZhZGQ3OGQxYTg4NjgwOWQ1ZWE4NzQ4MjE2MWU5YmQyYVwiLCBcIndlYklkXCI6IFwiMzMwYmU0MTYtNmZkZi00OTJkLThlZjctYWU0N2RmODkxZDNiXCIsIFwiZWRnZXNjYXBlXCI6IHtcInJlZ2lvbkNvZGVcIjogXCJaSlwifSwgXCJicmFuZElkXCI6IFwiZnBcIiwgXCJkYXRhQ2VudGVySWRcIjogXCJVUy1OVlwiLCBcInNpdGVJZFwiOiBcImZwLXVzXCIsIFwiYW5vbnltb3VzXCI6IHRydWUsIFwic2NvcGVcIjogW1wiR1VFU1RcIl0sIFwiY3JlYXRlZFRpbWVcIjogMTU0MjA3Mzc0Ny42MzQ2NzgsIFwicHJvZmlsZUlkXCI6IFwiWmJyUnF3ZS9mS1ZLcWJxdzhtVkVaNm9icnh6OW9nV1NjNUhYc1NLOVJjUnRKdEZ4SmN1TnZJZ0tZNHZXWnZwRzVVKzNyczJyeGhqbTRwRDMwQ2NJQnc9PTk3NTdhMDQ3ZDc4OTVkOGM2ZjEyN2M0M2E1NDQzZDhjNGUzOTg0ODU5MjIyNWI4YjM5YjQyY2M1MDM3OGY4MjlcIiwgXCJ0cmFjZXJcIjogXCI2NkRDRFVBQ09SXCIsIFwiZ2VvUmVnaW9uXCI6IFwiQVMtU0dcIiwgXCJzaXRlR3JvdXBcIjogXCJmcFwifSIsImV4cCI6MTU0MjY4OTk5MC43MzIwMTd9.IFy0UjURfyfRDqL2Oy2L-PfEYsjdUN_THxum774PpUI%22%2C%20%22reauthToken%22%3A%20%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJmcCIsImlhdCI6MTU0MjY4OTM5MC43MzI0NTMsImRhdGEiOiJ7XCJjcmVhdGVkVGltZVwiOiAxNTQyNjg5MzkwLjczMjQzNywgXCJzY29wZVwiOiBbXCJHVUVTVFwiXSwgXCJ0cmFjZXJcIjogXCI2NkRDRFVBQ09SXCIsIFwicHJvZmlsZUlkXCI6IFwiWmJyUnF3ZS9mS1ZLcWJxdzhtVkVaNm9icnh6OW9nV1NjNUhYc1NLOVJjUnRKdEZ4SmN1TnZJZ0tZNHZXWnZwRzVVKzNyczJyeGhqbTRwRDMwQ2NJQnc9PTk3NTdhMDQ3ZDc4OTVkOGM2ZjEyN2M0M2E1NDQzZDhjNGUzOTg0ODU5MjIyNWI4YjM5YjQyY2M1MDM3OGY4MjlcIn0iLCJleHAiOjE1NTgyNDEzOTAuNzMyNDUzfQ.05PzQHk7sumuOBN3etAFbF6oJdFBPqzBkOCOd0WHW8M%22%2C%20%22expiresIn%22%3A%20600.0%2C%20%22reauthExpiresIn%22%3A%2015552000.0%2C%20%22scope%22%3A%20%22GUEST%22%2C%20%22tracer%22%3A%20%2266DCDUACOR%22%2C%20%22dataCenterId%22%3A%20%22US-NV%22%2C%20%22geoRegion%22%3A%20%22AS-SG%22%2C%20%22createdAt%22%3A%201542689390738.5554%2C%20%22authExpiresTime%22%3A%201542689870.738557%2C%20%22reauthExpiresTime%22%3A%201558241390.738559%7D; urbn_personalization_context=%5B%5B%22device_type%22%2C%20%22SMALL%22%5D%2C%20%5B%22personalization%22%2C%20%5B%5B%22ab%22%2C%20%5B%5B%22SS_AFTERPAY%22%2C%202%5D%5D%5D%2C%20%5B%22experience%22%2C%20%5B%5B%22image_quality%22%2C%2050%5D%2C%20%5B%22reduced%22%2C%20true%5D%5D%5D%2C%20%5B%22initialized%22%2C%20false%5D%2C%20%5B%22isCallCenterSession%22%2C%20false%5D%2C%20%5B%22isSiteOutsideNorthAmerica%22%2C%20true%5D%2C%20%5B%22isSiteOutsideUSA%22%2C%20true%5D%2C%20%5B%22isViewingInEnglish%22%2C%20false%5D%2C%20%5B%22isViewingRegionalSite%22%2C%20true%5D%2C%20%5B%22loyalty%22%2C%20false%5D%2C%20%5B%22loyaltyPoints%22%2C%20%22%22%5D%2C%20%5B%22siteDown%22%2C%20false%5D%2C%20%5B%22thirdParty%22%2C%20%5B%5B%22dynamicYield%22%2C%20true%5D%2C%20%5B%22googleMaps%22%2C%20true%5D%2C%20%5B%22moduleImages%22%2C%20true%5D%2C%20%5B%22personalizationQs%22%2C%20%22%22%5D%2C%20%5B%22productImages%22%2C%20true%5D%2C%20%5B%22promoBanners%22%2C%20true%5D%2C%20%5B%22tealium%22%2C%20true%5D%5D%5D%2C%20%5B%22userHasAgreedToCookies%22%2C%20false%5D%5D%5D%2C%20%5B%22scope%22%2C%20%22GUEST%22%5D%2C%20%5B%22user_location%22%2C%20%2217207ba5a0411d708b5f5ebc82b1e660%22%5D%5D; urbn_page_visits_count=%7B%22fp-cn%22%3A7%7D; _gat_tealium_0=1; utag_main=v_id:01670ac20fc6003ecf4c2e59541403072002106a00bd0$_sn:3$_ss:0$_st:1542691191866$ses_id:1542688715992%3Bexp-session$_pn:8%3Bexp-session; stc114946=env:1542688716%7C20181221043836%7C20181120051951%7C7%7C1044753:20191120044951|uid:1542073751642.1900106033.1588197.114946.1094320709:20191120044951|srchist:1044753%3A1542688716%3A20181221043836:20191120044951|tsa:1542688716257.1568798979.1532102.6803290756511806.:20181120051951; bm_sv=95919CB237359FB8C1551EA97812EED3~NwA9WZTfeEPZJF5jSWQnpQ2+pG7wwxfy3IKq9sOvhsnGWyq5W42yshi5QdraeAlPjsd1wXFwKDuDGLDsfYHOU9dAIDteAQ5QyiYvO9a2WImVsU8COXvarrMVoFYdhIb5vhW7R/P+XuWRLP2lmVh7Oj7LAcmX+N4NaBIJAmVTVXs=; _px3=7fbd3781a1e6d2f12a26c518e7377f072d92a5fc548d2532c7d27a93ae3ec85b:bbpp3TM0a4opBY1SWanZHQfdvNNn7fNZuNrbcuSWkAQXWEf7DDQAWImpV3Xsp0h8TbfmeGKInWKuxINvQyF+fg==:1000:5/KX9p3U2YODm9Yoa2Xy+yg/gIpGnnzZN8dR8YMvzhQ9fQQluLB5Tv5gD/u2+8yy4sioWJ2TRrOo45yj2rY/lvQuEVSr1BaRtDBcyOqz2SiL73zUUnjFsVmGASiCmLnc4M4E/tmziCgCuObXN67qbJAkeg1genvNzKIpEchlOO0=; RT="sl=7&ss=1542688713306&tt=39491&obo=1&sh=1542689409372%3D7%3A1%3A39491%2C1542689051905%3D6%3A0%3A39491%2C1542689025858%3D5%3A0%3A35282%2C1542689010406%3D4%3A0%3A33204%2C1542688889854%3D3%3A0%3A28595&dm=freepeople.com&si=28d090b5-7eb5-46d3-9611-e92a00e8e5f5&bcn=%2F%2F36fb78dc.akstat.io%2F&ld=1542689409372&r=https%3A%2F%2Fwww.freepeople.com%2Fchina%2Fxinpin%2F&ul=1542689409374"'
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    page = re.compile(r'o-pagination__li o-pagination__number--next">(.+?)</li>', re.S).findall(response.text)
                    if page:
                        page_count = page[0].strip()
                        print('该网站共%s页' % page_count)
                        return int(page_count)
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'http://valentinenyc.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, timeout=5, verify=False)
                if response.status_code == 200:
                    res_html = re.compile(r'document">(.+?)<div class="footer-wrapper">', re.S).findall(response.text)
                    res_text = re.compile(r'container product-section">(.+)<div class="container product-section">',
                                          re.S).findall(res_html[0])
                    res_sub = re.compile(r'img src="(.+?)"').findall(res_text[0])
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'princessetamtam' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, timeout=5, verify=False)
                if response.status_code == 200:
                    res = requests.get(self.base_url, headers=self.headers, verify=False)
                    res_xz = re.compile(r'product-tile-thumb animate fade-in " src="(.+?)".+?data', re.S).findall(res.text)
                    res_pe = re.compile(r'<img src="(.+?)" alt="Soutien-gorge').findall(res.text)
                    res_items = re.compile(r'product-item col-xs-6 col-sm-3').findall(res.text)
                    res_url_count = res_xz + res_pe
                    return res_url_count, res_items
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.oysho.cn' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = response.json()
                    items_list = res.get('products')
                    res_sub = []
                    for key, item in enumerate(items_list):
                        # print(item.get('detail').get('xmedia'))
                        if item.get('detail').get('xmedia') != None:
                            path = item.get('detail').get('xmedia')[0].get('path')
                            idmedia_list = item.get('detail').get('xmedia')[0].get('xmediaItems')[0].get('medias')
                        else:
                            path = item.get('bundleProductSummaries')[0].get('detail').get('xmedia')[0].get('path')
                            idmedia_list = \
                            item.get('bundleProductSummaries')[0].get('detail').get('xmedia')[0].get('xmediaItems')[0].get(
                                'medias')
                        for idmedia in idmedia_list:
                            idme = 'https://static.oysho.net/6/photos2' + path + '/' + idmedia.get('idMedia') + '6' + '.jpg'
                            print(key, idme)
                            if '_11_1' in idme or '_13_1' in idme:
                                pass
                            else:
                                res_sub.append(idme)
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'stellamccartney' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_list = re.compile(r'<li class="products-item   ".+?>(.+?)</li>', re.S).findall(response.text)
                    res_sub = []
                    for res in res_list:
                        res_ = re.compile(r'img .+?srcset="(.+?) .+?"').findall(res)
                        if res_:
                            res_img1 = res_[0]
                            res_sub.append(res_img1)
                        res_f = re.compile(r'img .+?srcset=&quot;(.+?) .+?,').findall(res)
                        if res_f and isinstance(res_f, list):
                            res_img2 = res_f[0]
                            res_sub.append(res_img2)
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.eberjey.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_list = re.compile(r'<ol class="products list items product-items">(.+?)</ol>', re.S).findall(
                        response.text)
                    res_sub = re.compile(r'data-altimage="(.+?)".+?<img.+?src="(.+?)"', re.S).findall(res_list[0])
                    res_find = []
                    for i in res_sub:
                        for j in i:
                            res_find.append(j)
                    return res_find
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.lilipiache.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_list = re.compile(r'ul class="pagination">(.+?)</ul>', re.S).findall(response.text)
                    pange_count = re.compile(r'<a .+?>(.+?)</a', re.S).findall(res_list[0])
                    return int(pange_count[-2])
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'forloveandlemons.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    large_image = re.compile(r'"large_image": "(.+?)".+?"large_hover_image": "(.+?)"', re.S).findall(
                        response.text)
                    res_sub = []
                    for image in large_image:
                        for item in image:
                            ima = 'http:' + item if 'https' not in item else str(item)
                            res_sub.append(ima)
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'hollisterco.cn' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_sub = re.compile(r'thumb-link lazy-loader.+?>.+?<img src="(.+?)".+?</a>', re.S).findall(
                        response.text)
                    large_resub = [i.replace('category', 'productMainZoom') for i in res_sub]
                    model_image = [i.replace('prod1', 'model1') for i in large_resub]
                    model2_image = [i.replace('prod1', 'model2') for i in large_resub]
                    count_image = large_resub + model_image + model2_image
                    return count_image, res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.une-nana-cool.com' in self.base_url:
            try:
                response = requests.post(self.base_url, data=data, headers=self.headers, verify=False)
                if response.status_code == 200:
                    image_urls = re.compile(r'<figure><img src=\'(.+?)\'').findall(response.text)
                    images = [i.replace('\\', '').replace('M1', 'B1') for i in image_urls]
                    return images
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.katthelabel.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_c = re.compile(r'pagination-custom">(.+?)</ul>', re.S).findall(response.text)
                    res_items = re.compile(r'<a.+?>(.+?)</a>.+?title="Next &raquo;">', re.S).findall(res_c[0])[0]
                    return int(res_items)
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.mimiholliday.cn' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'AjaxinateLoop.+?>(.+?)</ul>', re.S).findall(response.text)
                    res_list = re.compile(r'<img.+?src="(.+?)">', re.S).findall(res[0])
                    res_count = re.compile(r'<li.+?>', re.S).findall(res[0])
                    res_sub = []
                    for item in res_list:
                        img_url = 'https:' + item
                        res_sub.append(img_url)
                    return res_sub, res_count
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'sloaneandtate.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'product three-per-row">.+?<img.+?src="(.+?)".+?>.+?<img.+?src="(.+?)".+?>',
                                     re.S).findall(response.text)
                    item_counts = re.compile(r'product three-per-row', re.S).findall(response.text)
                    items = len(item_counts)
                    res_sub = []
                    for item in res:
                        for it in item:
                            it_url = 'http:' + it
                            res_sub.append(it_url)
                    return res_sub, int(items)
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'fleurofengland.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'products list items product-items">(.+?)</ol>', re.S).findall(response.text)
                    url_small = re.compile(r'<img.+?src="(.+?)".+?>', re.S).findall(res[0])
                    items_count = len(re.compile(r'<li.+?>', re.S).findall(res[0]))
                    res_sub = [url.replace('small_image/298x398/beff4985b56e3afdbeabfc89641a4582',
                                           'image/595x817/1ac472b2e3bed24f4b7f75082897d970') for url in url_small]
                    return res_sub, int(items_count)
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'heidiklumintimates.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_sub = []
                    res_list = re.compile(
                        r'<results>.*?<images>//(..*?)</images>.*?<images>//(.*?)</images>.*?<images>//(.*?)</images>.*?</results>').findall(
                        response.text)
                    for items in res_list:
                        for item in items:
                            res_sub.append('http://' + item.replace('.220w', '.1010w'))
                    return res_sub, int(len(res_list))
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.agentprovocateur.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_list = re.compile(r'products-grid".*?>(.*?)</ol>', re.S).findall(response.text)
                    small_images = re.compile(r'<img.*?src="(.+?)".*?>', re.S).findall(res_list[0])
                    res_items = re.compile(r'col-lg-3 col-md-3 col-sm-4 col-xs-6 item', re.S).findall(response.text)
                    res_sub = [i.replace('small_image/225x', 'image/500x') for i in small_images]
                    return res_sub, int(len(res_items))
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.thereformation.com' in self.base_url:
            try:
                response = requests.get(self.base_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res_list = re.compile(r"pagination'.+?>(.+?)<p class='visually-hidden'>", re.S).findall(response.text)
                    url_images = re.compile(r'data-product-hover-image="(.*?)".*?data-src="(.*?)"', re.S).findall(res_list[0])
                    res_items = re.compile(r'product-summary__media-link', re.S).findall(res_list[0])
                    res_sub = []
                    for urls in url_images:
                        for url in urls:
                            res_sub.append(url)
                    return res_sub, int(len(res_items))
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()









    def download_image(self, current_url):  # 因为这个网站有页数，正则匹配到这
        if 'www.freepeople.com' in current_url:
            try:
                res = requests.get(current_url, headers=self.headers, verify=False)
                if res.status_code == 200:
                    res_html = re.compile(r'data-qa-product-grid>(.+?)<div class="o-row">', re.S).findall(res.text)
                    res_sub = re.compile(r'<img.+?src="(.+?)".+?data-alt-src=', re.S).findall(res_html[0])
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.lilipiache.com' in current_url:
            try:
                response = requests.get(current_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'collection-products rows-of-3">(.+?)<ul class="pagination">', re.S).findall(
                        response.text)
                    res_list = re.compile(r'img src="(.+?)"').findall(res[0])
                    res_list = ['http:'+i for i in res_list]
                    return res_list
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()
        elif 'www.katthelabel.com' in current_url:
            try:
                response = requests.get(current_url, headers=self.headers, verify=False)
                if response.status_code == 200:
                    res = re.compile(r'grid-uniform image_autoheight_enable">(.+?)<hr>', re.S).findall(response.text)
                    res_s = re.compile(r'<img src="(.+?)".+?>', re.S).findall(res[0])
                    res_sub = ['http:' + i for i in res_s]
                    return res_sub
                else:
                    print('服务器无响应，随后再试')
            except Exception as e:
                print('请求超时')
                os.sys.exit()


    def download_urls(self, urls, page, c):
            senyu = int(c)-int(page)
            t = time.strftime('%Y-%m-%d', time.localtime())
            img = 'D:/共享文件/爬虫图片不要改名否则无效'
            # img = 'img'
            img_day = img+'/'+t
            img_name = img_day + '/' + re.compile(r'^(https|http)://(.+?)/').search(self.base_url).group(2)
            if not os.path.exists(img):
                os.mkdir(img)
                os.mkdir(img_day)
                os.mkdir(img_name)
            else:
                if not os.path.exists(img_day):
                    os.mkdir(img_day)
                else:
                    if not os.path.exists(img_name):
                        os.mkdir(img_name)
            path_name = img_name + '/'
            print(urls)
            print('正在下载第%s张图片,剩余%s' % (page, senyu))
            lock = multiprocessing.Lock()
            lock.acquire()
            try:
                if urls not in self.sets:
                    # request.urlretrieve(urls, filename=path_name + self.files_name(urls) + '.png')
                    res = requests.get(urls, verify=False)
                    if res.status_code == 200:
                        ll = path_name+self.files_name()+'.png'
                        with open(ll, 'wb') as f:
                            f.write(res.content)
                        self.sets.add(urls)
                    else:
                        print('服务器响应失败,下载失败')
                else:
                    print('已经抓过了')
            finally:
                lock.release()
    def run(self, res_urls):
        p = Pool(20)
        pages = len(res_urls)
        print('共%s张图片' % pages)
        j = 1  # 当前第几页
        c = pages  # 总页数
        for i in res_urls:
            if 'aerie' in i:
                imges_link = 'https:' + re.compile(r'(.+?) ').findall(i)[0].replace('$cat-main_small$',
                                                                                    '$PDP_78_Main$').strip()
                p.apply_async(self.download_urls, args=(imges_link, j, c))
                j += 1
                if j == c:
                    j = 1
            elif 'yse' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'freepeople' in i:
                img_url = 'https:'+i.replace('category$&amp;qlt=50&amp;fit=constrain', 'pdp-detail-shot$&hei=900&qlt=50&fit=constrain')
                p.apply_async(self.download_urls, args=(img_url, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'valentine' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'princessetamtam' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'oysho' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'stellamccartney' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'eberjey' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif '/1693/' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif '/0271/' in i or 'quality=80' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'anf.scene7.com' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'www.wacoal.jp' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif '/1845/' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif '/0948/' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif '/0241/' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'd1ss97vo2n62dr.cloudfront.net' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'image-cdn.symphonycommerce.com' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'agentprovocateur.com' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
                if c == j:
                    j = 1
            elif 'res.cloudinary.com' in i:
                p.apply_async(self.download_urls, args=(i, j, c))
                j += 1
        p.close()
        p.join()
        print('全部下载完成,共下载%s张图片' % str(j-1))

if __name__ == '__main__':
    url_list = [
        'https://www.ae.com/aerie-bras/aerie/s-cat/4840012?cm=sUS-cUSD&navdetail=mega:cat6610030:c1:p2',
        'https://www.yse-paris.com/fr/53-e-shop',
        'https://www.freepeople.com/china',
        'http://valentinenyc.com/shop/'
    ]
    spider = Spider(url_list[3])
    print('-----------开始下载----------')
    res_urls = spider.parse()
    print('--------准备解析图片-----------')
    spider.run(res_urls)








