# coding=utf-8


import os

from coach.parsers import L1CategoryUrlParser,L2CategoryUrlParser

from common.entities import Category, Categories, Products
from common.utils import working_dir, cat_l1_file_suffix, cat_l2_file_suffix, cat_l3_file_suffix, cat_l4_file_suffix, \
    prod_file_suffix, get_http_text_resp, get_x_level_categories_from_file, \
    get_categories_having_prod_from_file, get_all_products_from_file, cstrtime, save_products_to_file, \
    save_categories_to_file, get_products_details, run_threads_in_queue, download_imgs


'''
程序名：  Coach自动抓取
功能：    根据"china.coach"的URL地址，延左侧导航栏获得分类URLs，再继续获得子分类的URLs，
          最后将所有自分类的显示全部开关打开，将该分类下的所有商品展示在一个页面中，抓取所有商品的"ID、名称、价格、品牌、分类、图片"。
语言：    python3.5.1
创建时间：2016-3-11
作者:     史正强
'''

# 0.1 site_url
def _site_host():
    return "china.coach.com"
site_url = "http://" + _site_host()

# 0.2 base_dir
_base_dir = working_dir + '/' + _site_host()

# 0.3 category_file
cat_l1_file = _base_dir + '/' + _site_host() + cat_l1_file_suffix
cat_l2_file = _base_dir + '/' + _site_host() + cat_l2_file_suffix
cat_l3_file = _base_dir + '/' + _site_host() + cat_l3_file_suffix
cat_l4_file = _base_dir + '/' + _site_host() + cat_l4_file_suffix

# 0.4 product_url_file
prod_file = _base_dir + '/' + _site_host() + prod_file_suffix

# 1.获得一级分类的数据
def get_top_level_categories():
    print("******* The top level categories urls *******")
    data = get_http_text_resp(site_url, site_url)
    cats = Categories()
    top_urls = ["/static/women.htm","/static/men.htm"]
    top_titles = ["女士商品","男士商品"]
    for i in range(0, len(top_urls)):
        url = top_urls[i]
        cat = Category(url, top_titles[i], url, cstrtime())
        cat.father = None
        cats.put_cat(cat)
    save_categories_to_file(cat_l1_file, cats)
    print(cats.get_cat_ids())
    return cats

# 2.获得二级分类的数据
def get_second_level_categories(top_level_cats):
    print("******* The 2nd level categories urls *******")
    cats = Categories()
    parser = L2CategoryUrlParser()
    for top_cat in top_level_cats.get_cats():
        data = get_http_text_resp(site_url, top_cat.cat_url)
        sub_levels_urls = parser.findall_urls(data)
        sub_levels_titles = parser.findall_titles(data)
        print(sub_levels_urls)
        print(sub_levels_titles)
        # if sub_levels_urls is not None:
        #     top_cat.has_child = True
        #     print(sub_levels_urls)
        #     for i in range(0, len(sub_levels_urls)):
        #         cat = Category(sub_levels_urls[i], sub_levels_titles[i], sub_levels_urls[i], cstrtime())
        #         cat.father = top_cat.cat_id
        #         cats.put_cat(cat)
        # else:
        #     top_cat.has_child = False
        #     print("Can not find the 2nd level categories")
        #     print("The url is " + top_cat.cat_url)
    # save_categories_to_file(cat_l2_file, cats)
    # save_categories_to_file(cat_l1_file, top_level_cats)
    return cats


if __name__ == "__main__":
    start_time = cstrtime()
    try:
        os.mkdir(working_dir)
    except FileExistsError as e:
        print(e)
    try:
        os.mkdir(_base_dir)
    except FileExistsError as e:
        print(e)

    top_cats = get_top_level_categories()
    get_second_level_categories(top_cats)