# -*- coding: utf-8 -*-
# @Author: arp
# @Date:   2017-04-25 00:48:35
# @Last Modified by:   arp
# @Last Modified time: 2017-04-26 19:15:02
import os
import json
from bs4 import BeautifulSoup as BS
from bs4.element import Tag
import requests
import logging
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
logging.basicConfig(level=logging.INFO, format=fmt)


class BSPage(object):

    def __init__(self, html, _type='html5lib'):
        if isinstance(html, (BS, Tag)):
            self.html = html
        else:
            self.html = BS(html, _type)

    def get_tag(self, tag, **kw):
        if not self.html:
            return None
        return self.html.find(tag, **kw)

    def get_table(self, **kw):
        return self.get_tag('table', **kw)

    @staticmethod
    def get_html(url):
        try:
            return requests.get(url).text
        except Exception:
            return None

    @staticmethod
    def parse_table(bsobj_table, headers=None, getlink=False, fmt=None, **kw):
        if not bsobj_table:
            return None
        if fmt is 'csv':
            getlink = False
        if isinstance(bsobj_table, (Tag, BS)):
            table = bsobj_table
        else:
            table = BS(bsobj_table, 'html5lib').find('table', **kw)
        if not headers:
            heads = table.find_all('th')
            heads = [h.get_text().strip() for h in heads if h]
        else:
            heads = headers
        logging.debug('[+] Get head: %s' % heads)
        # heads = [h.strip() for h in heads if h]
        data_trs = table.tbody.find_all('tr')
        datas = []
        for tr in data_trs:
            data = BSPage._parse_table_row(tr, getlink)
            if data:
                logging.debug('[+] Get data: %s' % data)
                if fmt is not 'csv':
                    data = dict(zip(heads, data))

                datas.append(data)
        if fmt is 'json':
            return json.dumps(datas, ensure_ascii=False)
        if fmt is 'csv':
            ret = ','.join(heads) + '\n'
            ret += '\n'.join([','.join(d) for d in datas if d])
            return ret
        return datas

    @staticmethod
    def _parse_table_row(bsobj_table_tr, getlink=False):
        if not bsobj_table_tr:
            return None

        if isinstance(bsobj_table_tr, (BS, Tag)):
            tr = bsobj_table_tr
        else:
            tr = BS(bsobj_table_tr, 'html5lib').find('tr')

        tds = tr.find_all('td')
        if not getlink:
            return [d.get_text().strip() for d in tds if d]

        return [(d.get_text().strip(), d.a.get('href') if d.a else None) for d in tds if d]

    @staticmethod
    def parse_filetree(filetable,
                       headers=['Mode', 'Name', 'Size'],
                       **kw):
        flist = BSPage.parse_table(filetable, headers, True, **kw)
        dlist = [d['Name'] for d in flist if d['Mode'][0].startswith('d')]
        files = [f['Name'] for f in flist if f['Mode'][0].startswith('-')]
        return dlist, files

    @staticmethod
    def download_all(url, fpath,
                     headers=['Mode', 'Name', 'Size'],
                     buffer_size=1024 * 1024,
                     overwrite=False,
                     **kw):
        if not url.startswith('http'):
            url = 'http://' + url
        _ = url.split('/')
        domain = _[2]
        last_path = _[-2] if url.endswith('/') else _[-1]
        fpath = os.path.sep.join([fpath, domain, last_path])
        if not os.path.exists(fpath):
            os.makedirs(fpath)

        html = BSPage.get_html(url)
        # HtmlParse.download_filetree(html, fpath, headers, domain, **kw)
        dlist, files = BSPage.parse_filetree(html, headers, **kw)
        for f in files:
            furl = url
            name, _ = f
            furl += '/' + name
            fname = os.path.sep.join([fpath, name])
            BSPage.save_file(furl, fname, buffer_size, overwrite)
        for d in dlist:
            durl = url
            name, _ = d
            dname = os.path.sep.join([fpath, name])
            if not os.path.exists(dname):
                os.makedirs(dname)
            if not durl.endswith('/'):
                durl += '/' + name + '/'
            else:
                durl += name + '/'
            logging.debug('[*] Download folder: %s' % name)
            dirs = BSPage.download_folder(durl, dname, headers, **kw)
            dlist += [(name + '/' + r[0], r[1]) for r in dirs]

    @staticmethod
    def download_folder(durl, dpath, headers, **kw):
        html = BSPage.get_html(durl)
        dd, ff = BSPage.parse_filetree(html, headers, **kw)
        for f in ff:
            furl = durl
            name, _ = f
            furl += '/' + name
            fname = os.path.sep.join([dpath, name])
            BSPage.save_file(furl, fname)
        return dd

    @staticmethod
    def save_file(url, fpath, buffer_size=16 * 1024, overwrite=False):
        if not url.startswith('http'):
            url = 'http://' + url
        logging.debug('[*] Get file: %s' % url)
        if not overwrite and os.path.exists(fpath):
            status = dict(ok=2, msg='file exists', data=fpath)
        if overwrite and os.path.exists(fpath):
            status = dict(ok=3, msg='file overwrite', data=fpath)
        else:
            try:
                r = requests.get(url)
                with open(fpath, 'wb') as f:
                    for trunk in r.iter_content(buffer_size):
                        f.write(trunk)
                status = dict(ok=1, msg='save file ok', data=fpath)
            except Exception:
                status = dict(ok=0, msg='get file error', data=fpath)
        logging.info('[*] Save file status: %s' % status)
        return status


class FilePage(object):

    def __init__(self, html):
        self.page = BSPage(html)
