#!/usr/bin/env python
# -*- coding:utf-8 -*-

__author__ = 'Nova'
# modified:yanglk2010@sina.com 2017-08-08 update 2.x to 3.x

import tileslicer
import os
from urllib.request import urlopen
import platform
import multiprocessing
import threading
import re;

CURR_DIR = os.path.abspath(".");
TILES_DIR = os.path.join(CURR_DIR, 'tiles')
#TILES_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tiles')
# HOSTS = [
# "http://online0.map.bdimg.com/tile/",
# "http://online1.map.bdimg.com/tile/",
# "http://online2.map.bdimg.com/tile/",
# "http://online3.map.bdimg.com/tile/",
# "http://online4.map.bdimg.com/tile/"
# ]
HOSTS = [
"http://online0.map.bdimg.com/onlinelabel/",
"http://online1.map.bdimg.com/onlinelabel/",
"http://online2.map.bdimg.com/onlinelabel/",
"http://online3.map.bdimg.com/onlinelabel/",
"http://online4.map.bdimg.com/onlinelabel/"
]
DEFAULT_PAGE_SIZE = 20


def get_tile_url(host, x, y, z):
    # 百度地图udt更新至20170803，p=1有字图，p=0为无字图
    return '%s?qt=tile&styles=pl&udt=20170803&scaler=1&p=1&x=%d&y=%d&z=%d' % (host, x, y, z)


def get_save_path(x, y, z):
    z_str = str(z)
    x_str = str(x)
    y_name = '%d.png' % y

    z_dir = os.path.join(TILES_DIR, z_str)
    if not os.path.exists(z_dir):
        os.makedirs(z_dir)
    x_dir = os.path.join(z_dir, x_str)
    if not os.path.exists(x_dir):
        os.mkdir(x_dir)
    y_file = os.path.join(x_dir, y_name)

    del z_str
    del x_str
    del y_name
    del z_dir
    del x_dir

    return y_file


def fetch_process(start, end, host):
    thread_count = 5
    threads = []

    page_size = (end-start)/thread_count
    if page_size <= 0:
        page_size = DEFAULT_PAGE_SIZE
    page_start = start

    for i in range(0, thread_count):
        page_end = min(page_start+page_size, end)
        if page_start < page_end:
            t = threading.Thread(target=fetch_thread, args=(page_start, page_end, host))
            t.start()
            threads.append(t)
        page_start += page_size

    for t in threads:
        t.join()

# 下载对象
def download_pic(url, save_path):
    f_in = urlopen(url, timeout=10)
    with open(save_path, 'wb') as f_out:
        f_out.write(f_in.read())
    print("download finished: %s "%url);
    del f_in

# 处理未成功下载的tile
def deal_error(filename):
    if not os.path.exists(filename):
        print("%s not exist"%filename);
        return;
    try:
        with open(filename, 'r') as error:
            for line in error.readlines():
                xyz = urlParse(line.strip());
                download_pic(line.strip(), get_save_path(xyz[0], xyz[1], xyz[2]));
    except Exception as e:
        print( 'failed to download: %s\nerror: %s' % (url, e))

#从URL中获取x, y, z变量
def urlParse(url):
    x = re.search("x=[1-9]{,3}", url).group()[2:];
    y = re.search("y=[1-9]{,3}", url).group()[2:];
    z = re.search("z=[1-9]{,3}", url).group()[2:];
    return (int(x), int(y), int(z));

def fetch_thread(start, end, host):
    url = None
    with open(os.path.join(CURR_DIR, "error.log"), mode='w') as f_error:
        while start < end:
            try:
                page_end = min(start+DEFAULT_PAGE_SIZE, end)
                rs = tileslicer.get_slice(start, page_end)
                for x, y, z in rs:
                    url = get_tile_url(host, x, y, z)
                    save_path = get_save_path(x, y, z)
                    if os.path.exists(save_path):
                        os.remove(save_path)
                    #下载图片
                    download_pic(url, save_path);
                    del save_path

                start += DEFAULT_PAGE_SIZE
                del page_end
                del rs
            except Exception as e:
                print( 'failed to download: %s\nerror: %s' % (url, e))
                f_error.write(url+'\n');


def reset_fetcher():
    tileslicer.clear_slices()


def start_fetch(tile_count):
    host_count = len(HOSTS)
    proc_count = host_count

    page_size = tile_count/proc_count
    if page_size <= 0:
        page_size = DEFAULT_PAGE_SIZE
    start = 0

    for i in range(0, proc_count+1):
        host = HOSTS[i % host_count]
        end = min(start+page_size, tile_count)
        if start < end:
            if platform.system() == 'Darwin':
                # mac os crash if multiprocessing is used, currently using threading instead
                t = threading.Thread(target=fetch_thread, args=(start, end, host))
                t.start()
            else:
                proc = multiprocessing.Process(target=fetch_process, args=(start, end, host))
                proc.start();
        print("start:%d------end:%d"%(start, end));
        start += page_size

def fetch_tiles_z(lng1, lat1, lng2, lat2, z, tiles_dir=TILES_DIR):
    if tiles_dir :
        TILES_DIR = tiles_dir;
    print( 'reset fetcher...')
    reset_fetcher()
    print( 'start slicer...')
    tile_count = tileslicer.slice_tiles_z(lng1, lat1, lng2, lat2, z)
    print( 'tile count %d' % tile_count)
    print( 'start fetching...')
    start_fetch(tile_count)


def fetch_tiles(lng1, lat1, lng2, lat2, z1, z2, tiles_dir=TILES_DIR):
    if tiles_dir :
        TILES_DIR = tiles_dir;
    print( 'reset fetcher...')
    reset_fetcher()
    print( 'start slicer...')
    tile_count = tileslicer.slice_tiles(lng1, lat1, lng2, lat2, z1, z2)
    print( 'tile count %d' % tile_count)
    print( 'start fetching...')
    start_fetch(tile_count)


if __name__ == '__main__':
    if not os.path.exists(TILES_DIR):
        os.mkdir(TILES_DIR)
    '''
    ShanDong Province
    {"lng":114.467361,"lat":34.127447}, {"lng":122.985309,"lat":38.125886}
    '''
    #fetch_tiles_z(114.457361, 34.127447, 122.985309, 38.125886, 12)
    #fetch_tiles(72, 4, 136, 54, 1, 12)
    deal_error("D:\\test/error.log");
