# coding: utf-8
from __future__ import print_function
import requests
from .errors import XapiQuotaException, XapiRunOutOfMemory
from bs4 import BeautifulSoup
from .geo_classes import Node, Way
import codecs
import os
import logging
import shutil
import re


class Xapi(object):
    base_url = "http://www.overpass-api.de/api/xapi?"

    def __init__(self, timeout=6000):
        self.timeout = timeout

    def query(self, query_url):
        print(query_url)
        r = requests.get(query_url, timeout=self.timeout)
        xml = r.text
        if r.text.find('Please check /api/status for the quota of your IP address') != -1:
            raise XapiQuotaException
        if r.text.find('Query run out of memory using about 2048 MB of RAM.') != -1:
            raise XapiRunOutOfMemory
        return xml

    def query_ways(self, tag_keyword, tag_value, region=None):
        if region is not None:
            bbox = '[bbox=%s,%s,%s,%s]' % (str(region.left_up.lng), str(region.right_down.lat),
                                           str(region.right_down.lng), str(region.left_up.lat))
        else:
            bbox = ''
        tag = '[%s=%s]' % (tag_keyword, tag_value)
        query_url = self.base_url + 'way' + tag + bbox
        return self.__process_xml_for_ways(self.query(query_url))

    def download_ways(self, filename, tag_keyword, tag_value, region=None):
        if region is not None:
            bbox = '[bbox=%s,%s,%s,%s]' % (str(region.left_up.lng), str(region.right_down.lat),
                                           str(region.right_down.lng), str(region.left_up.lat))
        else:
            bbox = ''
        tag = '[%s=%s]' % (tag_keyword, tag_value)
        query_url = self.base_url + 'way' + tag + bbox
        print(query_url)
        r = requests.get(query_url, timeout=self.timeout, stream=True)
        try:
            with open(filename, 'wb') as f:
                chunk_count = 0
                for chunk in r.iter_content(chunk_size=4096):
                    if chunk:  # filter out keep-alive new chunks
                        chunk_count += 1
                        f.write(chunk)
                        f.flush()
                        if chunk_count % 100 == 0:
                            logging.info('Saved %d chunks to %s' % (chunk_count, filename))
        except Exception, e:
            os.remove(filename)
            raise e
        if os.path.getsize(filename) < 10 * 1024:
            with codecs.open(filename) as f:
                content = f.read()
                if content.find('Please check /api/status for the quota of your IP address') != -1:
                    os.remove(filename)
                    raise XapiQuotaException
                if content.find('Query run out of memory using about 2048 MB of RAM.') != -1:
                    os.remove(filename)
                    raise XapiRunOutOfMemory


    def process_one_file_for_ways(self, filename):
        with codecs.open(filename, encoding='utf-8') as f:
            return self.__process_xml_for_ways(f.read())

    def __process_xml_for_ways(self, xml):
        # soup = BeautifulSoup(xml, "html.parser")
        # logging.info('Bs4 parser finished')
        # nodes = soup.find_all('node')
        # index = {}
        # for node in nodes:
            # index[int(node['id'])] = Node(lat=float(node['lat']), lng=float(node['lon']))
        # ways = soup.find_all('way')
        # output_ways = []
        # for way in ways:
            # poly_nodes = way.find_all('nd')
            # if poly_nodes[0]['ref'] == poly_nodes[-1]['ref']:
                # output_ways.append(Way([index[int(node['ref'])] for node in poly_nodes]))
        # return output_ways
        """
        一开始用bs4写的。会超内存。所以采用比较dirty的写法
        """
        index = {}
        nodes = re.findall('<node id="(.*?)" lat="(.*?)" lon="(.*?)"', xml)
        logging.info('Found %d nodes.' % len(nodes))
        for node in nodes:
            index[int(node[0])] = Node(lat=float(node[1]), lng=float(node[2]))
        del nodes
        logging.info('Index has been created.')
        way_start = xml.find('<way id="')
        output_ways = []
        while way_start != -1:
            try:
                way_end = xml.find("</way>", way_start)
                way = xml[way_start:way_end + 6]
                poly_nodes = re.findall('<nd ref="(.*?)"', way)
                output_ways.append(Way([index[int(node)] for node in poly_nodes]))
            except Exception, e:
                logging.warn('Error with:' + way[0:way.find('>') + 1])
            finally:
                way_start = xml.find('<way id="', way_end)
        return output_ways


if __name__ == '__main__':
    import const
    x = Xapi()
    # x.query_ways('landuse', 'farmland', const.NEW_YORK_REGION)
    # ways = x.process_one_file_for_ways('data/ny_farmland.txt')
    # [print(node) for node in ways[0].nodes]
    x.download_ways('data/download.txt', 'landuse', 'farmyard', const.USA_REGION)
