# coding:utf-8

'''
程序名：  Ferragamo自动抓取
功能：    根据"www.ferragamo.com"的URL地址，延左侧导航栏获得分类URLs，再继续获得子分类的URLs，
         最后将所有自分类的显示全部开关打开，将该分类下的所有商品展示在一个页面中，抓取所有商品的"ID、名称、价格、品牌、分类、图片"。
语言：    python3.5.1
创建时间： 2016-2-23   修改于2016-2-23
作者:     苏勃
'''

import os
from time import sleep

from ferragamo.parsers import ProductDetailsParser, ProductUrlParser, CategoryUrlParser

from common.entities import Products, Product, Categories, Category
from common.utils import cat_l1_file_suffix, prod_file_suffix, working_dir, http_post, get_http_text_resp, \
    get_categories_having_prod_from_file, get_all_products_from_file, cstrtime, save_products_to_file, \
    save_categories_to_file, run_threads_in_queue, get_products_details, download_imgs

# 0.1 site_url
language = 'en'
cc = 'usa'

site_url = 'http://www.ferragamo.com/shop/' + language + '/' + cc
if language == 'cn':
    site_url = 'http://www.ferragamo.cn'

search_on_cat_uri = '/fullSearchOnCategoryJSON'
storeId = '31150'
catalogId = '38051'
langId = '-1'

brand = 'ferragamo'

# 0.2 base_dir
_base_dir = working_dir + '/' + brand + '_' + language + '_' + cc

# 0.3 category_file
cat_l1_file = _base_dir + '/' + language + '_' + cc + cat_l1_file_suffix

# 0.4 product_url_file
prod_file = _base_dir + '/' + language + '_' + cc + prod_file_suffix


# 1.获得所有分类的数据
def get_all_categories():
    print("******* The top level categories urls *******")
    data = get_http_text_resp(site_url, '')
    parser = CategoryUrlParser()
    ids = parser.findall_ids(data)
    cats = Categories()
    if ids is None:
        return None
    for cat_id in ids:
        cat = Category(cat_id, None, None, cstrtime())
        cat.has_child = False
        cats.put_cat(cat)
    save_categories_to_file(cat_l1_file, cats)
    print(cats.get_cat_ids())
    return cats


# 2.获得终极分类下所有商品的URLs
def get_products_urls(categories):
    print("******* Get all products urls under the categories *******")
    products = Products()
    for cat in categories.get_cats():
        body = 'categoryId=' + cat.cat_id + '&storeId=' + storeId + '&catalogId=' + catalogId + '&langId=' + langId
        data = http_post(site_url, search_on_cat_uri, body)
        data = data.replace('/*', '')
        data = data.replace('*/', '')
        p = ProductUrlParser.findall_products(data)
        if p is not None:
            products.putall(p)
        else:
            print("!!!!!!!Can not find any product url under:\n" + cat.cat_url)
    return products


# 2.1.获得所有商品的URLs
def get_all_products_urls():
    print("******* Get all products urls *******")
    cats = get_categories_having_prod_from_file(cat_l1_file)
    products = get_products_urls(cats)
    save_products_to_file(prod_file, 'w', products)


# 3.获得每个商品的详情信息
def __get_products_details(save_to_file, products):
    parser = ProductDetailsParser()

    for prod in products.get_products():
        sleep(1)
        data = get_http_text_resp(site_url, prod.prod_url)
        product_id = prod.prod_id
        cat_id = prod.prod_cat
        product_name = prod.prod_name = parser.find_name(data)
        prod.prod_desc = product_desc = parser.find_desc(data)
        made_in = parser.find_madein(data)
        prod.prod_desc = product_desc = product_desc + '\nMade in ' + made_in
        prod.prod_price = price = parser.find_price(data)
        prod.prod_currency = currency = parser.find_currency(data)
        colors = prod.prod_colors = parser.findall_colors(data)
        sizes = prod.prod_sizes = parser.findall_sizes(data)

        prod.prod_img_zoomed_urls = images_zoomed = parser.findall_zoomed_images(data)
        prod.prod_img_main_urls = images_main = parser.findall_main_images(data)

        print(product_id, product_name, prod.prod_url, cat_id, currency, price, colors, sizes, images_zoomed,
              images_main, product_desc)

    save_products_to_file(save_to_file, 'a', products)
    sleep(1)
    os.rename(save_to_file, save_to_file[0: len(save_to_file) - 1])


if __name__ == "__main__":
    start_time = cstrtime()
    try:
        os.mkdir(_base_dir)
    except FileExistsError as e:
        print(e)
    get_all_categories()
    get_all_products_urls()

    thread_array = [] * 0
    thread = get_products_details(prod_file, __get_products_details, 10)
    thread_array.append(thread)
    run_threads_in_queue(thread_array)
    prods = get_all_products_from_file(prod_file)
    download_imgs(brand + '_' + language, prods, '', 10)
    print('\nThe spider ran from ' + start_time + ' -- ' + cstrtime())
