# -*- utf-8 -*-
# author : li shi jie
# Email : yr785339493@qq.com
import requests
import urllib3
import re
import os
urllib3.disable_warnings()
from multiprocessing import Pool
from collections import Counter
class Spider_yse:
    def __init__(self):
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        }
        self.code_url = 'https://captcha.su.baidu.com/session_cb/?pub=377e4907e1a3b419708dbd00df9e8f79&callback=callback'
        self.base_url = 'https://www.hankypanky.com/cdn-cgi/l/chk_captcha?id=47d950b38aa72846&captcha_challenge_field=d294399398216ec60ecb968a168987b9_MTU0Mjg2ODM3Mi41NDQ%3D_10.26.88.42&manual_captcha_challenge_field=budpq'
    def parse(self):
        response_sessionstr = requests.get(self.code_url, headers=self.headers, verify=False)
        sessionstr = re.compile(r'"sessionstr":"(.+?)"').findall(response_sessionstr.text)[0]
        code_image_url = 'https://captcha.su.baidu.com/image/?session={}&pub=377e4907e1a3b419708dbd00df9e8f79'.format(sessionstr)
        code_image = requests.get(code_image_url, headers=self.headers, verify=False)
        with open('code.png', 'wb') as f:
            f.write(code_image.content)
        res_url = 'https://www.hankypanky.com/cdn-cgi/l/chk_captcha?id=47d98f101f7b283a&captcha_challenge_field=9c7ea619dad6256ca797ab012663d6c1_MTU0Mjg3MDQ0My43NTM%3D_10.58.94.21&manual_captcha_challenge_field='+ input('验证码: ')
        s = requests.session()
        p = s.get(res_url, headers=self.headers, verify=False)
        count_images = []
        p = 1
        for i in range(1, 9):
            imag_url = 'https://www.hankypanky.com/bras-and-tops.html?p=%s' % i
            print(imag_url)
            response = s.get(imag_url, headers=self.headers, verify=False)
            res = re.compile(r'category-products">(.+?)<div class="toolbar-bottom">', re.S).findall(response.text)
            res_sub = re.compile(r'<div class="product-image-wrapper">.+?<img src="(.+?)".+?>', re.S).findall(res[0])
            # 小图换大图
            res_images = [i.replace('small_image/240x240/17f82f742ffe127f42dca9de82fb58b1', 'image/600x600/9df78eab33525d08d6e5fb8d27136e95') for i in res_sub]
            for j in res_images:
                url_text = s.get(j)
                print(p)
                with open('C:/Users/1/Desktop/project/ms/app/crawlapp/img/2018-11-22/' + str(p) + '.png', 'wb') as f:
                    f.write(url_text.content)
                p += 1




    def down_load(self, url, page):

        if not os.path.exists('yse'):
            os.mkdir('yse')
        r = requests.get(url)
        print('正在下载第%s张' % page)
        with open('yse/'+str(page)+'.png', 'wb') as f:
            f.write(r.content)

        # print('正在下载%s' % j)

if __name__ == '__main__':
    spider = Spider_yse()
    res_sub = spider.parse()
    # p = Pool(1)
    # j = 1
    # for url in res_sub:
    #     ul = 'https:'+url.replace('category$&amp;qlt=50&amp;fit=constrain', 'pdp-detail-shot$&hei=900&qlt=50&fit=constrain')
    #     print(ul)
    #     # p.apply_async(spider.down_load, args=(url, j))
    #     j += 1
    # # j = 1
    # # for i in range(200):
    # #     p.apply_async(spider.parse, args=(j,))
    # #     j += 1
    # p.close()
    # p.join()


















