# coding:utf-8

import csv
import os
import threading
import time
import urllib.error
import urllib.request
import urllib.parse

from time import sleep

from common.entities import Products, Product, Categories, Category
from common.htmlparser import HtmlParser

working_dir = os.getcwd() + '/downloads'
encoding = 'GBK'

cat_l1_file_suffix = '_cat_l1.csv'
cat_l2_file_suffix = '_cat_l2.csv'
cat_l3_file_suffix = '_cat_l3.csv'
cat_l4_file_suffix = '_cat_l4.csv'
prod_file_suffix = '_products.csv'

# 支持文件类型
# 用16进制字符串的目的是可以知道文件头是多少字节
# 各种文件头的长度不一样，少则2字符，长则8字符
filetypes = {
    b'\xff\xd8\xff': '.jpg',
    b'\x89\x50\x4E\x47': '.png',
    b'\x47\x49\x46\x38': '.gif',
    b'\x49\x49\x2A\x00': '.tif',
    b'\x42\x4D': '.bmp'}


# 获取文件类型
def filetype(data):
    keys = filetypes.keys()
    ftype = 'unknown'
    for hcode in keys:
        if data.startswith(hcode):
            ftype = filetypes[hcode]
            break
    return ftype


# Http header user-agent
user_agent = ('User-Agent',
              'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) '
              'Chrome/45.0.2454.85 Safari/537.36')

content_type_x_www_form = ('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')


def cstrtime():
    return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))


def cstrtime_ymd():
    return time.strftime("%Y-%m-%d", time.localtime(time.time()))


def err_log(module, level, url, code, msg):
    if url is None:
        url = ''
    if code is None:
        code = ''
    if msg is None:
        msg = ''
    err_line = module + ',' + level + ',' + cstrtime() + ',' + url + ',' + str(code) + ',' + msg + '\n'
    print('ERROR: ' + err_line)
    file_except = open(working_dir + '/' + module + '.err.' + cstrtime_ymd(), 'a')
    file_except.write(err_line)
    file_except.close()


def log(module, level, url, code, msg):
    line = module + ',' + level + ',' + cstrtime() + ',' + url + ',' + str(code) + ',' + msg + '\n'
    print('Logger: ' + line)
    file_except = open(working_dir + '/' + module + '.out.' + cstrtime_ymd(), 'a')
    file_except.write(line)
    file_except.close()


# 排队执行线程
def run_threads_in_queue(threads):
    if threads is None:
        print('The thread queue is None! Nothing to run!')
        return
    queue_size = len(threads)
    if queue_size == 0:
        print('The thread queue is Empty! Nothing to run!')
        return
    running_thread_index = 0
    if not threads[running_thread_index].is_alive():
        threads[running_thread_index].start()
    while running_thread_index < queue_size - 1:
        if threads[running_thread_index].is_alive():
            sleep(30)
            continue
        else:
            running_thread_index += 1
            threads[running_thread_index].start()
    __wait_for_threads_finish(threads)


# 等待线程完成
def __wait_for_threads_finish(threads):
    if threads is None:
        print('The thread queue is None! Nothing to wait!')
        return
    running_no = len(threads)
    while running_no > 0:
        for t in threads:
            if t.is_alive():
                break
            running_no -= 1
            sleep(1)
        sleep(30)


# 获得Http post
def http_post(root_url, uri, data, retry_no=3):
    if not uri.startswith('http'):
        url = root_url + uri
    else:
        url = uri
    try:
        opener = urllib.request.build_opener()
        opener.addheaders = [user_agent, content_type_x_www_form]
        if type(data) is type(''):
            postdata = data
        else:
            postdata = urllib.parse.urlencode(data)
        postdata = postdata.encode('utf-8')
        resp = opener.open(url, postdata).read()
        print('Got the http response for ' + url)
        if len(resp) == 0:
            resp = None
            err_log('http', 'e', uri, 404, 'The http response is null')
        resp = decode(resp, 'utf8')
        return resp
    except urllib.error.HTTPError as e:
        err_log('http', 'e', e.filename, e.code, e.msg)
        if retry_no > 0 and e.code != 404:
            retry_no -= 1
            sleep(2)
            return get_http_raw_resp(root_url, uri, retry_no)
        else:
            return None
    except urllib.error.URLError as e:
        err_log('http', 'e', e.filename, e.errno, e.reason)
        if retry_no > 0:
            retry_no -= 1
            sleep(2)
            return get_http_raw_resp(root_url, uri, retry_no)
        else:
            return None
    except Exception as e:
        err_log('http', 'e', url, -1, str(e))


# 获得Http raw数据
def get_http_raw_resp(root_url, uri, retry_no=3):
    if not uri.startswith('http'):
        url = root_url + uri
    else:
        url = uri
    try:
        opener = urllib.request.build_opener()
        opener.addheaders = [user_agent]
        data = opener.open(url).read()
        print('Got the http response for ' + url)
        if len(data) == 0:
            data = None
            err_log('http', 'e', uri, 404, 'The http response is null')
        return data
    except urllib.error.HTTPError as e:
        err_log('http', 'e', e.filename, e.code, e.msg)
        if retry_no > 0 and e.code != 404:
            retry_no -= 1
            sleep(2)
            return get_http_raw_resp(root_url, uri, retry_no)
        else:
            return None
    except urllib.error.URLError as e:
        err_log('http', 'e', e.filename, e.errno, e.reason)
        if retry_no > 0:
            retry_no -= 1
            sleep(2)
            return get_http_raw_resp(root_url, uri, retry_no)
        else:
            return None
    except Exception as e:
        err_log('http', 'e', url, -1, str(e))


def get_http_text_resp(root_url, uri):
    data = get_http_raw_resp(root_url, uri)
    if data is not None:
        data = decode(data, 'utf8')
    return data


# HTML解码
def decode(data, encode):
    return data.decode(encode, 'ignore')


# 从文件读取X级分类
def get_x_level_categories_from_file(file_name):
    file_cat_lx = open(file_name, 'r', encoding=encoding)
    reader = csv.reader(file_cat_lx)
    cats = Categories()
    for line in reader:
        cat = Category(line[0], line[1], line[2])
        cat.father = line[3]
        if len(line) > 4:
            cat.has_child = line[4]
        if len(line) > 5:
            cat.updated = line[5]
        cats.put_cat(cat)
    file_cat_lx.close()
    return cats


# 从文件读取X级分类中has_child=False数据
def get_categories_having_prod_from_file(file_name):
    file_cat_lx = open(file_name, 'r', encoding=encoding)
    reader = csv.reader(file_cat_lx)
    cats = Categories()
    for line in reader:
        if len(line) > 0:
            cat = Category(line[0], line[1], line[2])
            cat.father = line[3]
            if len(line) > 4:
                cat.has_child = line[4]
            if len(line) > 5:
                cat.updated = line[5]
            if not cat.haschild():
                cats.put_cat(cat)
    file_cat_lx.close()
    return cats


# 从文件读取所有商品数据
def get_all_products_from_file(prod_file):
    file_prod = open(prod_file, 'r', encoding=encoding, newline='')
    reader = csv.reader(file_prod)
    products = Products()
    # 解析csv读取的数组
    pattern = r'\'(.*?)\''
    parser = HtmlParser()
    csv_array_rule = 'csv array'
    parser.addrule(csv_array_rule, pattern)
    for line in reader:
        prod = Product(line[0], line[1], line[2], line[3])
        if len(line) > 4:
            prod.prod_currency = line[4]
        if len(line) > 5:
            prod.prod_price = line[5]
        if len(line) > 6:
            prod.prod_colors = parser.findall(csv_array_rule, line[6])
        if len(line) > 7:
            prod.prod_sizes = parser.findall(csv_array_rule, line[7])
        if len(line) > 8:
            prod.prod_img_zoomed_urls = parser.findall(csv_array_rule, line[8])
        if len(line) > 9:
            prod.prod_img_main_urls = parser.findall(csv_array_rule, line[9])
        if len(line) > 10:
            prod.prod_desc = parser.findall(csv_array_rule, line[10])
        if len(line) > 11:
            prod.prod_feature = parser.findall(csv_array_rule, line[11])
        if len(line) > 12:
            prod.prod_updated = parser.findall(csv_array_rule, line[12])

        products.put(prod)
    file_prod.close()
    return products


# 写文件
def save_file(file_name, data):
    f = open(file_name, 'wb')
    f.write(data)
    f.close()
    print(file_name + ' is saved.')


# 保存分类数据到文件
def save_categories_to_file(file_name, categories):
    file_cat = open(file_name, 'w', encoding=encoding, newline='')
    writer = csv.writer(file_cat)
    for cat in categories.get_cats():
        writer.writerow([cat.cat_id, cat.cat_name, cat.cat_url, cat.father, cat.has_child, cstrtime()])
    file_cat.close()


# 保存商品数据到文件
def save_products_to_file(file_name, mode, products):
    file_prod = open(file_name, mode, encoding=encoding)
    writer = csv.writer(file_prod)
    for prod in products.get_products():
        writer.writerow([prod.prod_id, prod.prod_name, prod.prod_url, prod.prod_cat, prod.prod_currency,
                         prod.prod_price, prod.prod_colors, prod.prod_sizes, prod.prod_img_zoomed_urls,
                         prod.prod_img_main_urls, prod.prod_desc, prod.prod_feature, cstrtime()])
    file_prod.close()


# 把商品分组
def __group_products(products, group_no):
    size = products.size()
    size_range = int(size / group_no)
    products_array = [] * 0
    for prod in products.get_products():
        products_array.append(prod)
    products_array_groups = []
    for i in range(0, group_no):
        products_array_groups.append(Products())
    for i in range(0, group_no):
        for k in range(size_range * i, min(size_range * (i + 1), size)):
            products_array_groups[i].put(products_array[k])
    return products_array_groups


# 把多个商品数据文件合并为一个
def __combine_product_details_files(file_names, file_name):
    products = Products()
    for f_name in file_names:
        prods = get_all_products_from_file(f_name)
        for product in prods.get_products():
            products.put(product)
    save_products_to_file(file_name, 'w', products)


# 把分组的多个商品详情文件合并,线程阻塞
def __combine_products_details(threads, file_names, file_name):
    starttime = cstrtime()
    __wait_for_threads_finish(threads)
    for i in range(0, len(file_names)):
        file_names[i] = file_names[i][0: len(file_names[i]) - 1]
    __combine_product_details_files(file_names, file_name)
    endtime = cstrtime()
    print('\nCombine products details ran from ' + starttime + ' -- ' + endtime)


# 获得每个商品的详情信息
def get_products_details(prod_file, func_get_products_details, thread_no=4):
    print("******* All products details *******")
    all_products = get_all_products_from_file(prod_file)
    products_groups = __group_products(all_products, thread_no)

    threads = [] * 0
    file_names = [] * 0
    for i in range(0, thread_no):
        file_names.append(prod_file + '.part' + str(i) + '_')
        t = threading.Thread(target=func_get_products_details, args=(file_names[i], products_groups[i]))
        threads.append(t)

    for t in threads:
        t.start()
        sleep(3)

    t = threading.Thread(target=__combine_products_details, args=(threads, file_names, prod_file))
    t.start()
    return t


# 下载图片
def download_imgs(site_id, products, base_url, thread_no=4):
    starttime = cstrtime()
    products_by_groups = __group_products(products, thread_no)

    working_path = working_dir + '/' + site_id + '/images'
    try:
        os.mkdir(working_path)
    except FileExistsError as e:
        print(e)
    os.chdir(working_path)

    threads = [] * 0
    for i in range(0, thread_no):
        t = threading.Thread(target=__download_imgs, args=(working_path, site_id, products_by_groups[i], base_url))
        threads.append(t)

    for t in threads:
        t.start()
        sleep(1)

    __wait_for_threads_finish(threads)
    endtime = cstrtime()
    print('\nDownload images ran from ' + starttime + ' -- ' + endtime)
    return threads


def __download_imgs(working_path, site_id, products, base_url):
    for prod in products.get_products():
        sleep(1)
        os.chdir(working_path)
        try:
            os.mkdir(prod.prod_id)
        except FileExistsError as e:
            print(e)
        # os.chdir(working_path + '/' + prod.prod_id)
        img_urls = prod.prod_img_zoomed_urls
        for i in range(0, len(img_urls)):
            data = get_http_raw_resp(base_url, img_urls[i])
            if data is None:
                continue
            ext = filetype(data)
            save_file(working_path + '/' + prod.prod_id + '/' + site_id + '_' + prod.prod_id + '_zoomed_' +
                      str(i + 1) + ext, data)
        img_urls = prod.prod_img_main_urls
        for i in range(0, len(img_urls)):
            data = get_http_raw_resp(base_url, img_urls[i])
            if data is None:
                continue
            ext = filetype(data)
            save_file(working_path + '/' + prod.prod_id + '/' + site_id + '_' + prod.prod_id + '_main_' +
                      str(i + 1) + ext, data)
