# -*- coding: utf-8 -*- 
# @Time : 2021/4/7 9:02 
# @Author : Dong 
# @File : tokyo.py
import os
import re
from multiprocessing import Pool

from pyquery import PyQuery as pq
from requests.exceptions import HTTPError

_workspace = 'G:\\tokyo_fashion'

import requests

#压根儿不用这么麻烦，直接两种方式:
# session = requests.Session()
# session.trust_env = False
# response = session.get('http://ff2.pw')
#或者:
# proxies = { "http": None, "https": None}
# requests.get("http://ff2.pw", proxies=proxies)
#都可以绕过系统设置的代理就好了

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}


def get_index(base_url):
    response = requests.get(base_url,headers=headers)
    try:
        if response.status_code == 200:
            return response.text
        return  None
    except Exception as e:
        get_index(base_url)

def parse_one_page(html):
    try :
        doc = pq(html)
        items = doc('.articles-list .news-block').items()
        product_list = []
        for i in items:
            product = {
                'src_ip': i.find('.photo').attr('href'),
                'title': i.find('.news-ititle').text(),
                'discrption': i.find('.news-desc').text()
            }
            product_list.append(product)
        mkdir_file(product_list)
    except HTTPError:
        parse_one_page(html)


def save_photo(photo_url,path_title):
    response = requests.get(photo_url,headers=headers)
    html = response.text
    doc2 = pq(html)
    d_items = doc2('.gallery .gallery-row').items()
    num = 0
    for d_item in d_items:
        det = d_item('.portrait img').attr('srcset')
        pattern = re.compile('768w,(.*?) 1000w', re.S)
        try:
            data = re.findall(pattern, det)
        except:
            continue
        for i in data:
            try:
                i = i.strip()
                photo_path = '%s.jpg' % num
                photo_path = os.path.join(path_title, photo_path)
                r = requests.get(i)
                with open(photo_path, 'wb') as f:
                    f.write(r.content)
                    print(photo_path + ' 文件保存成功')
                num += 1
            except Exception as e:   # https://tokyofashion.com/wp-content/uploads/2021/02/NK-2020-09-06-002-004-Harajuku-DZ7-7046-600x900.jpg
                print(e)
                continue

def save_description(text,photo_path):
    text_path = os.path.join(photo_path,'description.txt')
    with open(text_path,'w',encoding='utf-8')as book:
        book.write(text)


def mkdir_file(product_list):
    for item in product_list:
        title = item.get('title')
        if 'w/' in title:
            ti = title.split('w/')
            t2 = ''
            for t in ti:
                t2 = t2 + t
            title = '%s' % t2
        else:
            title = '%s' % title
        path_title = os.path.join(_workspace,title)
        if not os.path.exists(path_title):
            os.mkdir(path_title)
        save_photo(item.get('src_ip'),path_title)
        save_description(item.get('discrption'),path_title)



def main(num):
    base_url = 'https://tokyofashion.com/category/tokyo-street-snaps/page/'
    index = base_url + str(num)
    html = get_index(index)
    parse_one_page(html)

# TODO 保存图片

if __name__ == '__main__':
    # https://tokyofashion.com/category/tokyo-street-snaps/page/2/
    pool = Pool()
    pool.map(main,[i for i in range(90,101)])









'''下载图片'''
# img = requests.get(url)
#
# file_name = name + '.jpg'
#
# print('开始保存图片')
#
# f = open(file_name, 'ab')
#
# f.write(img.content)
#
# print(file_name, '图片保存成功！')
#
# f.close()