import datetime
import re
import time
import urllib

from .Sqlite3Common import DEFAULT_PAGE_SIZE, url_parttern
from .Sqlite3PageParser import Sqlite3PageParser, Sqlite3PageSearch


class RecoverUrl:
    def __init__(self) -> None:
        self.lastest_pos = None

    def detect(self, data):
        pages = Sqlite3PageSearch().search(data)
        if not pages:
            return
        results = []
        for page_pos in pages:
            parser = Sqlite3PageParser()
            if not parser.parse_leafpage(data[page_pos:page_pos+DEFAULT_PAGE_SIZE]):
                continue

            cur_page_size = parser.reset_page_size()
            # 推断当前数据块尾部只有叶子叶的部分数据，则该数据应该被缓存到下一个块
            if page_pos + cur_page_size > len(data):
                self.lastest_pos = page_pos
                continue
            if cur_page_size != DEFAULT_PAGE_SIZE:
                parser.reset_pagedata(data[page_pos:page_pos+cur_page_size])

            records = parser.parse_cells()
            records = self.filter(records)
            if records:
                results += records
        return results

    def filter(self, records):
        '''
        保留一列符合url链接格式的字符串，一列大于sqlite时间戳公元2000年的数
        [
            [None, 'https://..../', 723810419731],
        ]
        >>>
        [
            {
                'title': 'test',
                'url': 'https://..../',
                'lastvisit': '2021-01-01 00:00:00'
            }
        ]
        '''
        global url_parttern
        res = []
        for record in records:
            item = {}
            hit_title = False  # for next is title
            for e in record:
                if hit_title and isinstance(e, str):
                    item['title'] = urllib.parse.unquote(e)
                    hit_title = False
                elif not item and isinstance(e, str) and re.match(url_parttern, e) is not None:
                    item['url'] = urllib.parse.unquote(e)
                    hit_title = True
                elif self.may_time_yield(e, record.index(e)):
                    item['visiteddate'] = self.sqlite_time_to_cst(e)
                    hit_title = False
                else:
                    hit_title = False

            if item and 'url' in item:
                res.append(item)
        return res

    # moz_places last_visit_date index = 8
    def may_time_yield(self, timestamp, index):
        if not timestamp or not isinstance(timestamp, int) or index > 8:
            return False

        if len(str(timestamp)) not in [16, 17]:
            return False

        return True

    def sqlite_time_to_cst(self, timestamp: int):
        if len(str(timestamp)) == 17:
            epoch = datetime.datetime(1601, 1, 1, 8, tzinfo=datetime.timezone.utc)
            visit_datetime = epoch + datetime.timedelta(microseconds=timestamp)
            return visit_datetime.strftime("%Y-%m-%d %H:%M:%S")
        elif len(str(timestamp)) == 16:
            x = time.localtime(timestamp / 1000000)
            return time.strftime('%Y-%m-%d %H:%M:%S', x)
