#!/usr/bin/env python3
# -*- coding:utf-8 -*-
## author : cypro666
## date   : 2014.6.1
"""
A group of utility functions about URL, regex, ip and others
"""
import os, sys
import json, re
import sched, time
from threading import Thread
from urllib.parse import urlparse, unquote_plus
try:
    from tornado.netutil import Resolver
    from requests import get, post
except ImportError as e:
    sys.stdout.write('%s\n' % str(e))

from .fileio import FileIO
from .debug import print_exception, time_meter

sysout = sys.stdout.write
syserr = sys.stderr.write


REGEX_IPV4 = "^\
([0-9]|[1-9][0-9]|1\d\d|2[0-4]\d|25[0-5])\.\
([0-9]|[1-9][0-9]|1\d\d|2[0-4]\d|25[0-5])\.\
([0-9]|[1-9][0-9]|1\d\d|2[0-4]\d|25[0-5])\.\
([0-9]|[1-9][0-9]|1\d\d|2[0-4]\d|25[0-5])$"
REGEX_IPV4 = re.compile(REGEX_IPV4)

REGEX_URL = """((http|ftp|https)://[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?)"""
REGEX_URL = re.compile(REGEX_URL, re.S)

IS_IPV4_STR = REGEX_IPV4.match


class ExtractURL(list):
    """Extract(match) all URL string from a text using re"""
    __slots__ = ('_unique')
    def __init__(self, text):
        self.extend(REGEX_URL.finditer(text))
    def unique(self):
        if not self._unique:
            self._unique = set(self)
        return iter(self._unique)


_BYTES_TYPES = (bytes, bytearray)

def obj2bytes(obj):
    """Object to bytes"""
    if isinstance(obj, str):
        try:
            return bytes(obj, 'ascii')
        except UnicodeEncodeError:
            try:
                return bytes(obj, 'utf-8')
            except UnicodeEncodeError:
                raise ValueError('Need ASCII or UTF-8 characters')
    if isinstance(obj, _BYTES_TYPES):
        return obj
    try:
        return memoryview(obj).tobytes()
    except Exception as e:
        raise TypeError("argument should be a bytes-like object or ASCII "
                        "string, not %r" % obj.__class__.__name__) from e

def utf8(s, errors = 'replace'):
    """Buffered object to utf-8 string"""
    return str(s, 'utf-8', errors=errors)


def addhttp(line):
    """Add http:// prefix to url string"""
    return line if line[:7] == 'http://' else 'http://' + line

def subhttp(line):
    """Remove http:// prefix from url string"""
    return line if line[:7] != 'http://' else line[7:]


def addhttpb(line):
    """Add http:// prefix to url string"""
    return line if line[:7] == b'http://' else b'http://' + line

def subhttpb(line):
    """Remove http:// prefix from url string"""
    return line if line[:7] != b'http://' else line[7:]


def strip_endl(line):
    """strip line feed(<LR>), contains \r because some file come form windows"""
    return line.rstrip('\r\n')

def strip_endlb(line):
    """strip line feed(<LR>), for binary, contains \r because some file come form windows"""
    return line.rstrip(b'\r\n')


def strip_url_param(url):
    """input:  www.abc.com/xyz?name=123
       output: www.abc.com/xyz
       using this in try-except block and check your url
    """
    return url.split('?', 1)[0]


def strict_unquote(s):
    """unquote encoded url string using strict schema"""
    for c in ('utf-8', 'gbk', 'latin-1'):
        try:
            return unquote_plus(s, c, 'strict')
        except:
            continue
    return None


class URLString(str):
    """Supplies parse, fetch and dns resolve for standard URL"""
    __slots__ = ('parsed', 'solver')
    @classmethod
    def config_dns_resolver(cls):
        """class method, if used, manage your own tornado ioloop!"""
        Resolver.configure('tornado.netutil.BlockingResolver')
    
    def __new__(cls, s):
        return str.__new__(cls, s)
    
    def __init__(self, s):
        super().__init__()
        self.parsed = urlparse(self)
        self.solver = Resolver()
        
    def resolveip(self):
        """only IP v4"""
        return self.solver.resolve(self.parsed.netloc, port=80).result()
    
    def get(self, **kargs):
        """send a get request to the url"""
        try:
            assert self.parsed.scheme
            return get(self, **kargs).text
        except Exception:
            if __debug__:
                print_exception('URLString.get')
            return None
    
    def post(self, data = {}, **kargs):
        """send a post request to the url"""
        try:
            assert self.parsed.scheme
            return post(self, data = data, **kargs)
        except Exception:
            if __debug__:
                print_exception('URLString.post')
            return None
    
    def __getattr__(self, attr):
        return self.parsed.__getattribute__(attr)


def loadjson(filename, objhook = None):
    """Load json from file and return json object"""
    with FileIO(filename) as f:
        return json.loads(f.readall(), encoding = 'utf-8', object_hook = objhook)


def dumpjson(objs, objhook = None, filename = None):
    """Dump a json object to string"""
    s = ''
    if objhook: s = json.dumps(list(map(objhook, objs)), indent = 4, ensure_ascii = False)
    else:       s = json.dumps(objs, indent = 4, ensure_ascii = False)
    if filename:
        with FileIO(filename, 'w') as f:
            f.write(s)
    return s


class IncreamentID(int):
    """Auto increasement integer, for making 'id'""" 
    def __init__(self):
        self = 0
    def __iter__(self):
        while True:
            self += 1
            yield self


class PathWalker(Thread):
    """Path walker that iterate all files under path and call callback"""
    def __init__(self, path, callback, delay, times):
        Thread.__init__(self)
        assert path and callback and delay >= 0 and times > 0
        sys.stdout.write(path+'\n')
        if not os.path.exists(path):
            raise ValueError('PathWalker : no such dir : ' + path)
        self.__path = path
        self.__delay = delay
        self.__times = times
        self.__callback = callback
        self.daemon = True # True is better

    def walk(self):
        for root, dirs, files in os.walk(self.__path):
            try:
                if root[-1] != os.sep:
                    root += os.sep
                for f in files:
                    self.__callback(root + f)
            except Exception as e:
                print_exception('PathWalker.walk')

    def run(self):
        sc = sched.scheduler(time.time, time.sleep)
        for i in range(self.__times):
            sc.enter(self.__delay * (i+1), 1, self.walk)
        sc.run()



def is_valid_dir(*args) ->bool:
    """Check dir name is realativ path or exists"""
    for d in args:
        assert isinstance(d, str)
        if not os.path.isdir(d):
            return False
        if d[0] in ('.', '~'):
            return False
    return True


def get_file_names(path, check):
    """Get all filenames under path and return a list of names which check(name) is True"""
    filenames = []
    def _callback(fn):
        if check(fn):
            sys.stdout.write(fn + ' added to param list\n')
            filenames.append(fn)
    pw = PathWalker(path, _callback, 0, 1)
    pw.start()
    pw.join(60)
    return filenames



@time_meter(__name__)
def test(url, jsonfile):
    s = URLString('http://www.baidu.com')
    print('URL : %s\n' % s)
    
    URLString.config_dns_resolver()
    print(s.resolveip())
    print('Response :\n', s.get()[:100])
    
    assert IS_IPV4_STR('123.123.123.123')
    assert not IS_IPV4_STR('234.345.456.678')
    assert not IS_IPV4_STR('1345.456.678')
    
    try:
        objs = loadjson(jsonfile)
        print(objs[0])
        s = dumpjson(objs, filename = os.path.dirname(jsonfile)+'/test_utilities.json')
        print(len(s))
    except Exception as e:
        pass

    print(ExtractURL(s))
    
    cb = lambda f : print(f, time.ctime(os.path.getctime(f)))
    pw = PathWalker(os.getcwd(), cb, 3.0, 1)
    pw.start()
    pw.join(4)
    print('finish')











