# coding:utf-8

'''
程序名：  Burberry自动抓取
功能：    根据"cn.burberry"的URL地址，延左侧导航栏获得分类URLs，再继续获得子分类的URLs，
         最后将所有自分类的显示全部开关打开，将该分类下的所有商品展示在一个页面中，抓取所有商品的"ID、名称、价格、品牌、分类、图片"。
语言：    python3.5.1
创建时间： 2016-2-16   修改于2016-2-16
作者:     苏勃
'''

import os
from time import sleep

from burberry.parsers import ProductDetailsParser, ProductUrlParser, CategoryUrlParser, \
    L1CategoryUrlParser, L2CategoryUrlParser, L3CategoryUrlParser

from common.entities import Category, Categories, Products
from common.utils import working_dir, cat_l1_file_suffix, cat_l2_file_suffix, cat_l3_file_suffix, cat_l4_file_suffix, \
    prod_file_suffix, get_http_text_resp, get_x_level_categories_from_file, \
    get_categories_having_prod_from_file, get_all_products_from_file, cstrtime, save_products_to_file, \
    save_categories_to_file, get_products_details, run_threads_in_queue, download_imgs

# 0.1 site_url
country_code = "cn"


def _site_host():
    return country_code + ".burberry.com"
site_url = "https://" + _site_host()

# 0.2 base_dir
_base_dir = working_dir + '/' + _site_host()

# 0.3 category_file
cat_l1_file = _base_dir + '/' + _site_host() + cat_l1_file_suffix
cat_l2_file = _base_dir + '/' + _site_host() + cat_l2_file_suffix
cat_l3_file = _base_dir + '/' + _site_host() + cat_l3_file_suffix
cat_l4_file = _base_dir + '/' + _site_host() + cat_l4_file_suffix

# 0.4 product_url_file
prod_file = _base_dir + '/' + _site_host() + prod_file_suffix


# 1.获得一级分类的数据
def get_top_level_categories():
    print("******* The top level categories urls *******")
    data = get_http_text_resp(site_url, site_url)
    cats = Categories()
    parser = L1CategoryUrlParser()
    top_urls = parser.findall_urls(data)
    top_titles = parser.findall_titles(data)
    for i in range(0, len(top_urls)):
        url = top_urls[i]
        cat = Category(url, top_titles[i], url, cstrtime())
        cat.father = None
        cats.put_cat(cat)
    save_categories_to_file(cat_l1_file, cats)
    print(cats.get_cat_ids())
    return cats


# 2.获得二级分类的数据
def get_second_level_categories(top_level_cats):
    print("******* The 2nd level categories urls *******")
    cats = Categories()
    parser = L2CategoryUrlParser()
    for top_cat in top_level_cats.get_cats():
        data = get_http_text_resp(site_url, top_cat.cat_url)
        sub_levels_urls = parser.findall_urls(data)
        sub_levels_titles = parser.findall_titles(data)
        if sub_levels_urls is not None:
            top_cat.has_child = True
            print(sub_levels_urls)
            for i in range(0, len(sub_levels_urls)):
                cat = Category(sub_levels_urls[i], sub_levels_titles[i], sub_levels_urls[i], cstrtime())
                cat.father = top_cat.cat_id
                cats.put_cat(cat)
        else:
            top_cat.has_child = False
            print("Can not find the 2nd level categories")
            print("The url is " + top_cat.cat_url)
    save_categories_to_file(cat_l2_file, cats)
    save_categories_to_file(cat_l1_file, top_level_cats)
    return cats


# 3.获得三级分类的URLs
def get_third_level_categories(up_level_cats):
    print("******* The 3rd level categories urls *******")
    cats = Categories()
    parser = L3CategoryUrlParser()
    for l2_cat in up_level_cats.get_cats():
        data = get_http_text_resp(site_url, l2_cat.cat_url)
        sub_levels_urls = parser.findall_urls(data)
        sub_levels_titles = parser.findall_titles(data)
        if sub_levels_urls is None:  # 若无三级分类, 三级分类 = 二级分类
            l2_cat.has_child = False
            print("Can not find the 3rd level categories")
            print("The url is " + l2_cat.cat_url)
        else:
            l2_cat.has_child = True
            for i in range(0, len(sub_levels_urls)):
                cat = Category(sub_levels_urls[i], sub_levels_titles[i], sub_levels_urls[i], cstrtime())
                cat.father = l2_cat.cat_id
                cats.put_cat(cat)
            print(sub_levels_urls)
    save_categories_to_file(cat_l3_file, cats)
    save_categories_to_file(cat_l2_file, up_level_cats)
    return cats


# 4.获得终极分类数据
def get_all_categories(top_level_cats, second_level_cats, third_level_cats):
    print("******* All categories urls *******")
    cats = Categories()
    parser = CategoryUrlParser()
    up_level_cats = Categories()
    up_level_cats.putall(top_level_cats)
    up_level_cats.putall(second_level_cats)
    up_level_cats.putall(third_level_cats)

    for up_level_cat in up_level_cats.get_cats():
        data = get_http_text_resp(site_url, up_level_cat.cat_url)
        if data is None:
            continue
        cat_array = parser.findall_cats(data, up_level_cat)
        for cat in cat_array:
            cats.put_cat(cat)
    save_categories_to_file(cat_l4_file, cats)
    save_categories_to_file(cat_l3_file, third_level_cats)
    return cats


# 5.获得终极分类下所有商品的URLs
def get_products_urls(categories):
    print("******* Get all products urls under the categories *******")
    products = Products()
    parser = ProductUrlParser()
    for cat in categories.get_cats():
        data = get_http_text_resp(site_url, cat.cat_url)
        if data is None:
            continue
        p = parser.findall_product_urls(data)
        if p is not None:
            products.putall(p)
        else:
            print("!!!!!!!Can not find any product url under:\n" + cat.cat_url)
    return products


# 6.获得所有商品的URLs
def get_all_products_urls():
    print("******* Get all products urls *******")
    l1_cats = get_categories_having_prod_from_file(cat_l1_file)
    products = get_products_urls(l1_cats)
    l2_cats = get_categories_having_prod_from_file(cat_l2_file)
    products.putall(get_products_urls(l2_cats))
    l3_cats = get_categories_having_prod_from_file(cat_l3_file)
    products.putall(get_products_urls(l3_cats))
    l4_cats = get_categories_having_prod_from_file(cat_l4_file)
    products.putall(get_products_urls(l4_cats))
    save_products_to_file(prod_file, 'w', products)


# 7.获得每个商品的详情信息
def __get_products_details(save_to_file, products):
    parser = ProductDetailsParser()

    for prod in products.get_products():
        data = get_http_text_resp(site_url, prod.prod_url)
        product_id = prod.prod_id
        product_name = prod.prod_name
        cat_id = prod.prod_cat

        prod.prod_currency = currency = parser.find_currency(data)
        prod.prod_price = price = parser.find_price(data)
        prod.prod_colors = colors = parser.findall_colors(data)
        prod.prod_sizes = sizes = parser.findall_sizes(data)

        prod.prod_desc = product_desc = parser.findall_desc(data)
        prod.prod_feature = product_feature = parser.findall_features(data)
        prod.prod_img_zoomed_urls = images_zoomed = parser.findall_zoomed_images(data)
        prod.prod_img_main_urls = images_main = parser.findall_main_images(data)
        print(product_id, product_name, prod.prod_url, cat_id, currency, price, colors, sizes, images_zoomed,
              images_main, product_desc, product_feature)

    save_products_to_file(save_to_file, 'a', products)
    sleep(1)
    os.rename(save_to_file, save_to_file[0: len(save_to_file) - 1])


if __name__ == "__main__":
    start_time = cstrtime()
    try:
        os.mkdir(working_dir)
    except FileExistsError as e:
        print(e)
    try:
        os.mkdir(_base_dir)
    except FileExistsError as e:
        print(e)
    get_top_level_categories()
    top_cats = get_x_level_categories_from_file(cat_l1_file)
    get_second_level_categories(top_cats)
    second_cats = get_x_level_categories_from_file(cat_l2_file)
    get_third_level_categories(second_cats)
    third_cats = get_x_level_categories_from_file(cat_l3_file)
    get_all_categories(top_cats, second_cats, third_cats)

    get_all_products_urls()
    thread = get_products_details(prod_file, __get_products_details, 8)
    thread_array = [] * 0
    thread_array.append(thread)
    run_threads_in_queue(thread_array)
    prods = get_all_products_from_file(prod_file)
    download_imgs(_site_host(), prods, site_url, 8)
    print('\nThe spider ran from ' + start_time + ' -- ' + cstrtime())
