import traceback
from gravityassist.trunc import cut_content

def try_fix_corrupted_content(content):
    def is_unprintable(character: str) -> bool:
        return (
            character.isspace() is False  # includes \n \t \r \v
            and character.isprintable() is False
            and character != "\x1A"  # Why? Its the ASCII substitute character.
            and character != "\ufeff"  # bug discovered in Python,
            # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
        )

    result = []
    for line in content.splitlines():
        line = line.strip()
        if line.startswith('|') and len(line) < 30:
            continue
        newline = ''
        count = 0
        for c in line:
            if is_unprintable(c):
                c = ' '
                count += 1
                if count > 3:
                    newline = ''
                    break
            newline += c
        if newline:
            result.append(newline)
    content = '\n'.join(result).strip()
    if content and len(content) < 40:
        content += '\nThis page seems to be corrupted.'
    return content

_g_page_cache = {}

def read_url(url: str, tokens_limit: int = 800):
    if url.startswith('<') and url.endswith('>'):
        url = url[1:-1]
    if url.startswith('https://www.google.com/search?q='):
        return {'error': 'Do not use the read_url function to search Google. Use the google_search function instead.'}
    if url in _g_page_cache:
        content = _g_page_cache[url]
    else:
        import trafilatura
        try:
            content = trafilatura.fetch_url(url)
        except:
            traceback.print_exc()
            return {'error': 'Failed to fetch URL', 'traceback': traceback.format_exc()}
        if content is None:
            return {'error': 'Failed to fetch URL, the page may not exist or is not accessible, try visit other links instead.'}
        assert isinstance(content, str), content
        if content.startswith('%PDF-'):
            return {'error': 'PDF document is not supported to parse for now, try visit other links instead.'}
        content = trafilatura.extract(content, deduplicate=True)
        content = try_fix_corrupted_content(content)
        content = content.replace(url, '')
        _g_page_cache[url] = content
    if not content:
        content = {'error': 'Failed to decode content for this page'}
    else:
        if tokens_limit > 0:
            content, truncation = cut_content(content, tokens_limit, 'gpt-3.5-turbo')
        else:
            truncation = None
        content = {'content': content}
        if truncation is not None:
            content['truncation'] = 'truncated from {} to {}'.format(*truncation)
    return content
