#!/usr/bin/python2
# -*- coding: utf-8 -*-
# @Author: ystlong
# @Date:   2019-07-07 01:23:40
# @Last Modified by:   ystlong
# @Last Modified time: 2019-07-07 03:57:04

import sys
import os
import sqlite3
import time
import re
import datetime

try:
    path = os.environ.get('CALIBRE_PYTHON_PATH', '/usr/lib64/calibre')
    if path not in sys.path:
        sys.path.insert(0, path)

    sys.resources_location = os.environ.get('CALIBRE_RESOURCES_PATH', '/usr/share/calibre')
    sys.extensions_location = os.environ.get('CALIBRE_EXTENSIONS_PATH', '/usr/lib64/calibre/calibre/plugins')
    sys.executables_location = os.environ.get('CALIBRE_EXECUTABLES_PATH', '/usr/bin')

    from calibre.ebooks.metadata.meta import get_metadata
except:
    raise
try:
    unicode("aa")
except NameError:
    unicode = str

unkinfo = ["未知", "Unknown", "!00001", "000001", ""]
exc_au_names = ["Administrator"]
support_stream_type = ["azw3", "mobi", "epub", "azw"]
def get_ebook_info(path):
    stream_type = os.path.splitext(path)[1].replace('.', '').lower()
    title, authors = "", []
    if stream_type in support_stream_type:
        try:
            with open(path, 'rb') as stream:
                mi = get_metadata(stream, stream_type, force_read_metadata=True)
            title, authors = mi.title, mi.authors
            if title in unkinfo:
                title = ""
            if len(authors) == 1 and authors[0] in exc_au_names:
                title, authors = "", []
            else:
                authors = [a for a in authors if a not in unkinfo]
        except:
            pass
            # raise
    # print(title, authors)
    # print("mi.authors", mi.authors)
    # print("mi.comments", mi.comments)
    # print("mi.cover", mi.cover)
    # # print("mi.cover_data", mi.cover_data)
    # print("mi.is_null", mi.is_null)
    # print("mi.isbn", mi.isbn)
    # print("mi.pubdate", mi.pubdate)
    # print("mi.publisher", mi.publisher)
    # print("mi.series", mi.series)
    # print("mi.series_index", mi.series_index)
    # print("mi.smart_update", mi.smart_update)
    # print("mi.title", mi.title)
    return title, authors

def safe_walk(top, topdown=True, onerror=None, followlinks=False, real_dirs={}):
    import os
    islink, join, isdir, realpath = os.path.islink, os.path.join, os.path.isdir, os.path.realpath

    # We may not have read permission for top, in which case we can't
    # get a list of the files the directory contains.  os.path.walk
    # always suppressed the exception then, rather than blow up for a
    # minor reason when (say) a thousand readable directories are still
    # left to visit.  That logic is copied here.
    try:
        # Note that listdir and error are globals in this module due
        # to earlier import-*.
        names = os.listdir(top)
    except os.error as err:
        if onerror is not None:
            onerror(err)
        return

    dirs, nondirs = [], []
    for name in names:
        if isdir(join(top, name)):
            dirs.append(name)
        else:
            nondirs.append(name)

    if topdown:
        yield top, dirs, nondirs
    for name in dirs:
        new_path = join(top, name)
        if followlinks or not islink(new_path):
            real_path = new_path
            if islink(new_path):
                real_path = realpath(new_path)
            if real_path in real_dirs:
                continue
            else:
                real_dirs[real_path] = True
            for x in safe_walk(new_path, topdown, onerror, followlinks, real_dirs):
                yield x
    if not topdown:
        yield top, dirs, nondirs

def list_files(root_dir, file_handle):
    # os.walk 将会遍历root_dir以及其所有的子目录
    for root, dirs, files in safe_walk(root_dir, topdown=False, followlinks=True):
        # for dir_name in dirs:
        # 	print("dir: %s"%(os.path.join(root, dir_name)))
        for file_name in files:
            file_handle(os.path.join(root, file_name), file_name)
        # print(root, dirs)

# def test_file_handle(file_path, file_name):
# 	print("file: %s"%(file_path))
# list_files(".", test_file_handle)

class DirIndex(object):
    """
    pre_url rel_path file_name file_ext file_size file_date
    """

    def __init__(self, database_file, prefix_uri='', debug=False):
        super(DirIndex, self).__init__()
        self.debug = debug
        # self.prefix_uri = prefix_uri
        self.db_file = database_file
        # self.db_file = os.path.realpath(__file__) + ".db"
        # # self.db_file = "gen_file_index.db"
        # self.db_file = ":memory:"

        self.conn = sqlite3.connect(self.db_file)
        self._init_database()
        self.start_insert = False
        self.book_info_time = 0
        self.sql_insert_time = 0
        self.force_update = False

    def debug_print(self, *msg):
        if self.debug:
            # print(*msg)
            pass

    def _database_execute(self, sql, args=()):
        self.debug_print(sql, args)
        res = self.conn.execute(sql, args)
        self.conn.commit()
        return res

    def _init_database(self):
        # res = self._database_execute("drop table if exists files")
        res = self._database_execute("""
            create table if not exists files(
            file_path text PRIMARY KEY not null,
            file_name text not null,
            file_ext text not null,
            ebook_title text,
            ebook_authors text,
            size int)
        """)
        
    def query_title_authors(self, file_path):
        sql = "select ebook_title, ebook_authors from files where file_path=?"
        res = self.conn.execute(sql, [unicode(file_path)])
        for r in res:
            return r[0], r[1]
        return "", ""

    assume_not_tile_re = re.compile(u"^[\d\._卷┏━┓]*$")
    def _insert_one_item(self, file_path, file_name, file_ext, size, date_str='', insert_count=0, insert=True):
        # print insert_count, file_path
        if not self.start_insert:
            self.start_time = datetime.datetime.now()
            self.start_insert = True
        if insert:
            sql = """insert or replace into files (file_path, file_name, file_ext, size, ebook_title, ebook_authors) values(?,?,?,?,?,?)"""
            t0 = datetime.datetime.now()
            title, authors = self.query_title_authors(file_path)
            if title in unkinfo or authors in exc_au_names or self.force_update:
                title, authors = get_ebook_info(file_path)
                authors = ",".join(authors)
            if self.assume_not_tile_re.match(unicode(title)):
                title = file_name
            # args = (file_path, file_name, file_ext, size, title, ",".join(authors))
            args =[unicode(ss) for ss in (file_path, file_name, file_ext, size, title, authors)]
            self.book_info_time += (datetime.datetime.now() - t0).microseconds
            
            t0 = datetime.datetime.now()
            res = self.conn.execute(sql, args)
            self.sql_insert_time += (datetime.datetime.now() - t0).microseconds

        if insert_count % 500 == 0:
            t0 = datetime.datetime.now()
            self.conn.commit()
            self.sql_insert_time += (datetime.datetime.now() - t0).microseconds
            print(insert_count, datetime.datetime.now() - self.start_time, self.book_info_time/1000, self.sql_insert_time/1000)
            # self.start_insert = False

    def generate_database(self, root_path, ignore_file):
        self.insert_count = 0
        rel_root_path = ""
        ignores = []
        if ignore_file != None and os.path.exists(ignore_file):
            with open(ignore_file) as ifs:
                for line in ifs:
                    line = line.strip()
                    if line == "":
                        continue
                    ignores.append(line)
        def _insert_file_handle(file_path, file_name):
            self.insert_count += 1
            rel_file_path = re.sub("^\.*/", "", file_path.replace(root_path, ""), count=0)
            # rel_file_path = file_path.replace(root_path, "").lstrip("/.")
            for ignore_path in ignores:
                if rel_file_path.find(ignore_path) >= 0:
                    # 忽略同步文件夹
                    return
            file_ext = os.path.splitext(rel_file_path)[-1].strip(".")
            file_size = os.path.getsize(file_path)
            self.debug_print("insert file: ", file_path, rel_file_path)
            self._insert_one_item(rel_file_path, file_name, file_ext, file_size, insert_count=self.insert_count)
        list_files(root_path, _insert_file_handle)
        # commit last item
        self._insert_one_item(None, None, None, 0, insert_count=0, insert=False)

    def clean(self, root_path):
        sql = """select file_path from files"""
        delete_sql = "delete from files where file_path=?"
        res = self._database_execute(sql)
        for row in res:
            if not os.path.exists(os.path.join(root_path, row[0])):
                print("clean: %s"%(row[0]))
                self._database_execute(delete_sql, (row[0], ))
            
    def search_file(self, query_str, full_word=False, page_size=0, page_no=0, sort=False, uniq=False):
        """
        query_str 格式:
        &and_word  &@and_word
        |or_word  |@or_word
        !no_word  !@no_word
        中间使用空格分割, 当添加@字符时全字匹配
        """
        def wrap_query_word(query_word, full_word):
            if query_word.startswith("@"):
                query_word = query_word[1:]
                full_word = True
            if full_word:
                return "%%%s%%"%("%".join(query_word.split()))
            else:
                return "%%%s%%"%("%".join(list(query_word)))
                
        query_key_map = {
            "and": {
                "link": "and",
                "cond": "like",
                "cols": [],
                "params": []
            },
            "or": {
                "link": "or",
                "cond": "like",
                "cols": [],
                "params": []
            },
            "not": {
                "link": "and",
                "cond": "not like",
                "cols": [],
                "params": []
            }
        }

        def prepare_conds(query_key_map, key, query_word):
            query_cols = ["file_path", "ebook_title", "ebook_authors"]
            cur_item = query_key_map[key]
            t = []
            for c in query_cols:
                t.append(" %s %s ? "%(c, cur_item["cond"]))
                cur_item["params"].append(query_word)
            cur_item["cols"].append("(%s)"%("or".join(t)))

        def get_conds(query_key_map):
            conds = []
            params = []
            for key in query_key_map:
                cur_item = query_key_map[key]
                if len(cur_item["cols"]) == 0:
                    continue
                query_type = "" if len(conds) == 0 else " and "
                cur_cond_str = cur_item["link"].join(cur_item["cols"])
                conds.append(" %s (%s) "%(query_type, cur_cond_str))
                params += cur_item["params"]
            res_cond_str = "" if len(conds) == 0 else " where %s "%("".join(conds)) 
            return res_cond_str, params

        for word in query_str.split():
            if word.startswith('&'):
                prepare_conds(query_key_map, "and", wrap_query_word(word[1:]))
            elif word.startswith("|"):
                prepare_conds(query_key_map, "or", wrap_query_word(word[1:], full_word))
            elif word.startswith("!"):
                prepare_conds(query_key_map, "not", wrap_query_word(word[1:], full_word))
            else:
                prepare_conds(query_key_map, "and", wrap_query_word(word, full_word))
        
        sql_cond, query_params = get_conds(query_key_map)

        sql = """select file_path, file_name, file_ext, size, ebook_title, ebook_authors from files %s"""%(sql_cond)
        sql_group_by = " group by ebook_title,size "
        if uniq:
            # sql += " group by file_name,size"
            # sql += " group by ebook_title,size"
            sql += sql_group_by
        if sort:
            # sql += " order by file_name "
            sql += " order by ebook_title, ebook_authors "
        # print(sql)
        # print(query_params)
        count_sql = """select count(1) from files %s"""%(sql_cond)
        if uniq:
            count_sql = """
            select count(1) from (
                select count(1) from files %s %s
            )
            """%(sql_cond, sql_group_by)
        total_count = self._database_execute(count_sql, query_params).fetchone()[0]
        if page_size > 0 and page_no > 0:
            sql += " limit ?,?"
            query_params.append(page_size*(page_no-1))
            query_params.append(page_size)
        res = self._database_execute(sql, query_params).fetchall()
        return (total_count, res)

class DirIndexHtml(object):
    """docstring for DirIndexHtml"""
    def __init__(self):
        super(DirIndexHtml, self).__init__()

        self.html_tpl = """
<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title>search list</title>
    <style>
        span {{
            height: 20px;
            display: inline-block;
            font-size: 1.3em;
            margin: 0.5em;
        }}
    </style>
    <script>
        // alt:18 key code
        // ctrl:17
        window.onkeyup = function(e) {{
            // console.log("up:"+e.keyCode+":"+e.key);
            var page_no={page_no};
            var p_url = "&page_size={page_size}&name={query_str}{url_params}";

            if (e.keyCode == 39) {{
                // arrow right, next page
                window.location.href = "?page=" + (page_no+1) +  p_url;
            }} else if (e.keyCode == 37) {{
                // arrow left, pre page
                window.location.href = "?page=" + (page_no-1) +  p_url;
            }}
        }}

        function default_ebook() {{
            var i = document.getElementById("query_input");
            if (document.getElementById("default_query").checked == true)
                i.value = document.getElementById("default_query").value;
            else
                i.value = "";
        }}
    </script>
</head>
<body>
<form action="./#sss">
    <pre>
    query_str 格式: 中间使用空格分割, 当添加@字符时全字匹配使用左右键可翻页
        and_word  @and_word
        |or_word  |@or_word
        !no_word  !@no_word
    </pre>
    <br id="sss"/>
    <input id="query_input" type="text" name="name" value="{query_str}" style="display: inline-block; width: 80%;">
    <br />
    <input type="checkbox" name="word" value="" {word_checked} /> query by full word
    <input type="checkbox" name="sort" value="" {sort_checked} /> sort
    <input type="checkbox" name="uniq" value="" {uniq_checked} /> uniq
    <input id="default_query" type="checkbox" onchange="default_ebook()" value="|@.azw |@.mobi |@.epub" /> choose general ebook format
    <input type="hidden" name="page" value="1" />
    <input type="hidden" name="page_size" value="{page_size}" />
</form>
<hr />
<div>
    <!-- page str -->
    <span>total: {total_count}</span>
    <span><a href="?page=1&page_size={page_size}&name={query_str}{url_params}">first</a></span>
    <span><a href="?page={pre_page}&page_size={page_size}&name={query_str}{url_params}">&lt;&lt;</a></span>
    {pages}
    <span><a href="?page={next_page}&page_size={page_size}&name={query_str}{url_params}">&gt;&gt;</a></span>
    <span><a href="?page={last_page}&page_size={page_size}&name={query_str}{url_params}">last</a></span>
</div>
<table>
    <tr><th>pp</th><th>ext</th><th>file_name</th><th>size</th></tr>
    <!-- <tr>
        <td><a href="..">..</a></td>
        <td>{{ext}}</td>
        <td>{{name}}</td>
        <td>{{size}}</td>
    </tr> -->
    {rows}
</table>
<div>
    <!-- page str -->
    <span>total: {total_count}</span>
    <span><a href="?page=1&page_size={page_size}&name={query_str}{url_params}">first</a></span>
    <span><a href="?page={pre_page}&page_size={page_size}&name={query_str}{url_params}">&lt;&lt;</a></span>
    {pages}
    <span><a href="?page={next_page}&page_size={page_size}&name={query_str}{url_params}">&gt;&gt;</a></span>
    <span><a href="?page={last_page}&page_size={page_size}&name={query_str}{url_params}">last</a></span>
</div>
</body>
</html>
        """

        self.row_tpl = """
            <tr>
                <td><a href="../{path}/..">..</a></td>
                <td>{ext}</td>
                <td><a href="../{path}" title="{name}">{ebook_title} ● {ebook_authors}</a></td>
                <td>{size}</td>
            </tr> 
        """
        
        self.page_tpl = """
        <span><a href="?page={page}&page_size={page_size}&name={query_str}{url_params}">{page}</a></span>
        """
        self.cur_page_tpl = """
        <span><b alt="?page={page}&page_size={page_size}&name={query_str}{url_params}">{page}</b></span>
        """

    def format_search(self, results, query_str, page_size, page_no, word, sort, uniq):
        total_count = results[0]
        items = results[1]
        row_item_html = []
        pages_html = []
        total_pages = 1

        def g_file_size(size):
            val = size / 1024.0
            if val < 1024:
                return "%.1fKB"%(val)
            val = val/1024.0
            return "%.1fMB"%(val)

        for item in items:
            tr = self.row_tpl.format(path=item[0], name=item[1],
                                     ext=item[2], size=g_file_size(item[3]),
                                     ebook_title=item[1] if item[4] == "" else item[4], ebook_authors=item[5])
            row_item_html.append(tr)
        # for i in range(33):
        # 	print(i)
        url_params = "%s%s%s#sss"%(
            "&word="if word else "", 
            "&sort=" if sort else "",
            "&uniq=" if uniq else "")

        if page_size > 0:
            total_pages = int(total_count / page_size)
            if total_count % page_size != 0:
                total_pages += 1
            # 显示当前页前后3page,共7page
            start_page = page_no - 3 if page_no > 3 else 1
            end_page = start_page + 7
            if end_page > total_pages:
                end_page = total_pages
            for i in range(start_page, end_page+1, 1):
                # print(start_page, end_page, i)
                tpl = self.page_tpl
                if i == page_no:
                    tpl = self.cur_page_tpl
                tt = tpl.format(
                    page=i, page_size=page_size,
                    query_str=query_str,
                    url_params=url_params,
                    )
                pages_html.append(tt)

        content = self.html_tpl.format(
            query_str=query_str,
            url_params=url_params,
            word_checked="checked" if word else "",
            sort_checked="checked" if sort else "",
            uniq_checked="checked" if uniq else "",
            rows="".join(row_item_html),
            total_count="%d/%d"%(page_size*page_no, total_count),
            page_size=page_size,
            last_page=total_pages,
            next_page=page_no+1,
            pre_page=1 if page_no <= 1 else page_no-1,
            pages="".join(pages_html),
            page_no=page_no
        )
        return content

def test():
    query_name = "filex"
    page_size = -1
    page_no = 0
    word = True

    dinx = DirIndex(":memory:", debug=False)
    dinx.generate_database("/home/long/work/sample", None)
    res = dinx.search_file(query_name, full_word=word, page_size=page_size, page_no=page_no)
    # print(res)
    dih = DirIndexHtml()
    html_res = dih.format_search(res, query_name, page_size, page_no, word)
    print(html_res)
    # print(dih.format_search(res), query_name)
# test()

def get_db_file():
    # db_file = os.path.realpath(__file__) + ".db"
    root_dir = os.path.dirname(os.path.realpath(__file__))
    run_time_dir = os.path.join(root_dir, ".run")
    if not os.path.exists(run_time_dir):
        os.mkdir(run_time_dir)
    db_file = os.path.join(run_time_dir, os.path.basename(__file__) + ".db")
    return db_file, root_dir

def cgi():
    # CGI处理模块
    import cgi, cgitb 
    # 创建 FieldStorage 的实例化
    form = cgi.FieldStorage() 
    # 获取数据
    query_name = form.getvalue('name')
    page_size = form.getvalue("page_size")
    page_no = form.getvalue("page")
    word = form.getvalue("word")

    db_file, root_dir = get_db_file()

    dinx = DirIndex(db_file, debug=False)
    res = dinx.search_file(query_name, full_word=word, page_size=page_size, page_no=page_no)
    dih = DirIndexHtml()
    html_res = dih.format_search(res, query_name, page_size, page_no, word)

    print("Content-type:text/html\n\n\n")
    print(html_res)
    

def scan():
    db_file, root_dir = get_db_file()
    ignore_file = os.path.join(root_dir, "flist.ignore")
    dinx = DirIndex(db_file, debug=False)
    dinx.generate_database(root_dir, ignore_file)

def clean():
    db_file, root_dir = get_db_file()
    dinx = DirIndex(db_file, debug=False)
    dinx.clean(root_dir)

def query(query_str):
    db_file, root_dir = get_db_file()
    dinx = DirIndex(db_file, debug=False)
    total_count, itmes = dinx.search_file(query_str)
    # print(total_count)
    print("total count: %d\n"%(total_count))
    for r in itmes:
        print(r[0])

PAGE_SIZE = 50
def flask_web():
    import flask
    app = flask.Flask(__name__)

    @app.route('/update')
    def update():
        try:
            start_time = time.time()
            clean()
            scan()
            end_time = time.time()
            return "update finish with out error, total %.1fs"%(end_time-start_time)
        except Exception as e:
            db_file, root_dir = get_db_file()
            # return "update error: %s %s"%(repr(e), db_file)
            return "update error: %s %s"%(repr(e))
            # raise

    @app.route('/')
    def search():
        try:
            def get_input(key, default, convert=None):
                val = flask.request.args.get(key)
                val = flask.request.form.get(key, val)
                if convert != None and val != None:
                    val = convert(val)
                if val == None:
                    val = default
                return val
            query_name = get_input('name', '')
            page_size = get_input("page_size", PAGE_SIZE, int)
            page_no = get_input("page", 1, int)
            if page_no <= 0:
                page_no = 1
            word = get_input("word", False, lambda x: x != None)
            sort = get_input("sort", False, lambda x: x != None)
            uniq = get_input("uniq", False, lambda x: x != None)

            db_file, root_dir = get_db_file()

            dinx = DirIndex(db_file, debug=False)
            res = dinx.search_file(query_name, full_word=word, 
                page_size=page_size, page_no=page_no, sort=sort, uniq=uniq)
            dih = DirIndexHtml()
            html_res = dih.format_search(res, query_name,
                            page_size, page_no, word, sort, uniq)
            return html_res
        except Exception as e:
            return repr(e)
            

    # print(sys.argv)
    if len(sys.argv) == 3:
        addr, port = sys.argv[2].split(":")
        # print(addr, port)
        app.run(host=addr, port=int(port))
    else:
        app.run()

def help():
    # usage_str = """
    # usage: 
    # 	{boot_name} flask [addr:port]
    # 	{boot_name} scan scan_relative_root_path
    # 	{boot_name} query query_str
    # """.format(boot_name=sys.argv[0])
    usage_str = """
    usage: 
        {boot_name} flask [addr:port]
        {boot_name} scan
        {boot_name} query query_str
        {boot_name} clean
    """.format(boot_name=sys.argv[0])
    print(usage_str)

if __name__ == '__main__':
    # test()
    try:
        if sys.argv[1] == "flask":
            flask_web()
        elif sys.argv[1] == "scan":
            # scan(sys.argv[2])
            scan()
        elif sys.argv[1] == "clean":
            clean()
        elif sys.argv[1] == "query":
            query(sys.argv[2])
    except IndexError:
        help()
        raise
    except:
        raise
    
