from .UrlsTableInfo import UrlsTableInfo
from .Sqlite3Parser import Sqlite3Parser


class RecoverUrls:
    '''
    1、从文件中恢复，则仅恢复已删除的自由块的部分
    2、从bytes中恢复，则尽可能恢复所有能查到到的上网记录

    tip: 当前未找到从bytes中读取 urls表所在的rootpage和sql
    遍历Page1的所有有效单元，应能查询到上述信息，获得urls表的rootpage和sql信息
    类似通过非sql查询获取所有urls表信息
    '''

    def __init__(self):
        self.free_blocks = []
        self.db_parser = []
        self.urls_table_info = None

    def recover(self, file: str = None, data: bytes = None) -> list:
        if not file and not data:
            return []
        if not data and file:
            data = self.read_sqlite(file)

        self.db_parser = Sqlite3Parser(data)
        self.urls_table_info = UrlsTableInfo(file, data)

        free_blocks = self.search_free_blocks(data)

        # for block_address in free_blocks:
            # self.db_parser.parse_block(block_address, self.urls_table_info)
        pageend = self.db_parser.parse_urls_page_end(self.db_parser.page_address(self.urls_table_info.rootpage))
        for pageindex in range(self.urls_table_info.rootpage, pageend+1):
            self.db_parser.parse_normal_block(pageindex, self.urls_table_info)

    def search_free_blocks(self, data):
        blocks = []
        if self.urls_table_info.rootpage not in range(2, self.db_parser.pagecount):
            raise 'wrong urls table rootpage'
        rootpage_address = self.db_parser.page_address(self.urls_table_info.rootpage)

        urls_table_end = self.db_parser.parse_urls_page_end(rootpage_address)
        
        for pageindex in range(self.urls_table_info.rootpage, urls_table_end+1):
            page_free_blocks = self.db_parser.parse_page_freeblocks(pageindex)
            blocks += page_free_blocks

        return blocks

    def read_sqlite(self, file):
        data = b''
        with open(file, 'rb') as f:
            data = f.read()
        return data


if __name__ == '__main__':
    recover = RecoverUrls()
    recover.recover('../data/History')
