#服务端工具

import os
import sqlite3
import base64
import logging
import hashlib

from network.package import JsonPackage, BytePackage
from database import *
from utils.epub import readBook

from bs4 import BeautifulSoup

dir = "books/"
logger = logging.getLogger(__name__)

class Controller:

    def __init__(self, bookPath, tempDir):
        self.bookPath = bookPath
        self.tempDir = tempDir
        self._readBooks()

    def _readBooks(self):
        """
        从目录中读取所有书籍
        """
        bookList = {}
        for filename in os.listdir(self.bookPath):
            if not filename.endswith('.epub'):
                continue
            book = readBook(os.path.join(self.bookPath, filename), self.tempDir)
            for key, val in book.items():
                bookList[key] = val

        self.bookList = bookList

    def getBookList(self, pack: JsonPackage, user):
        """
        处理 booklist 请求

        响应 list<dict>

        id: str 书籍ID
        filename: str 文件名
        title: str 书籍名称
        authors: str 作者
        """
        return (self._getBookList(), user)

    def _getBookList(self):
        result = []
        for key, val in self.bookList.items():
            book = {}
            book['id'] = val['hash']
            book['filename'] = os.path.basename(val['path'])
            print(os.path.basename(val['path']))
            book['title'] = val['title']
            book['authors'] = val['author']
            result.append(book)

        retPack = JsonPackage(result)
        return retPack

    def upload(self, pack: BytePackage, user):
        """
        上传书籍
        请求：
        filename: str 文件名

        JsonPackage 响应 list<dict> 同 getBookList()
        """
        msg = ""
        status = True
        filename = pack.head["filename"]

        hash = hashlib.md5(pack.data[:1024 * 32]).hexdigest()
        print(hash)
        if hash in self.bookList:
            status = False
            msg = f"电子书{filename}已存在！"

        filepath = os.path.join(self.bookPath, hash + ".epub")
        try:
            f = open(filepath, mode="wb")
            f.write(pack.data)
        except:
            status = False
            msg = f"文件{filename}上传失败！"
        finally:
            f.close()

        if status:
            # 添加书籍到 bookList
            book = readBook(filepath, self.tempDir)
            for key, val in book.items():
                self.bookList[key] = val

        return (JsonPackage({"status": status, "msg":msg}), user)

    def download(self, pack: JsonPackage, user):
        """
        下载书籍
        请求：
        id: str 书籍ID

        BytePackage 响应
        filename: str 文件名
        """
        book = self.bookList[pack.data['id']]
        filepath = book['path']
        head = {'filename': os.path.basename(filepath)}
        with open(filepath, 'rb') as f:
            result = BytePackage(head, f.read())
        return (result, user)

    def getMetadata(self, pack: JsonPackage, user):
        """
        获取书籍元数据
        请求：
        id: str 书籍ID

        JsonPackage 响应
        id: str 书籍 ID
        title: str 书籍名
        authors: str 作者
        toc: list<list>  目录结构
        cover: base64(bytes) 封面图片
        """
        book = self.bookList[pack.data['id']]
        metadata = {
                'id': book['hash'],
                'title': book['title'],
                'authors': book['author'],
                'toc': book['toc'], # toc: list<[level, name, id]>
                #'cover': base64.b64encode(book['cover']).decode()
        }
        retPack = JsonPackage(metadata)
        return (retPack, user)

    def getChapterContent(self, pack: JsonPackage, user):
        """
        获取章节内容
        请求：
        id: str 书籍ID
        chapter: int 章节序号（从0开始）

        JsonPackage 响应
        content: str 章节内容
        res: dict 资源
        """
        book = self.bookList[pack.data['id']]
        chap = pack.data['chapter']
        ret = {
                "content": book['content'][chap],
                "res": {}
        }

        soup = BeautifulSoup(ret['content'], features="lxml")
        img = soup.find_all('img')
        src = [i.get('src') for i in img]
        basePath = os.path.join(self.tempDir, pack.data['id'])
        chapPath = book['resource'][chap]
        logger.info(src)
        for i in src:
            logger.debug("PATH: "+ chapPath + " " + i)
            
            path = os.path.relpath(os.path.join(chapPath, i))
            with open(os.path.join(basePath, path), 'rb') as f:
                ret['res'][path] = base64.b64encode(f.read()).decode()

        retPack = JsonPackage(ret)
        return (retPack, user)
