#!/usr/bin/env python3

import codecs
import os
import os.path
import json
import textwrap
from sys import exit
from io import StringIO
from bs4 import BeautifulSoup
import requests
import click
import tldextract
from urllib.parse import urlparse
import logging
from http.client import HTTPConnection  # py3

log = logging.getLogger('urllib3')
log.setLevel(logging.DEBUG)

# logging from urllib3 to console
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)

# print statements from `http.client.HTTPConnection` to console/stdout
HTTPConnection.debuglevel = 1


SEPLINE = '--' * 5
TEXTWIDTH = 100


@click.group()
def cli():
    pass


def load_apikey_json():
    if os.path.exists('apikeys.kys') and os.path.isfile('apikeys.kys'):
        fp = open('apikeys.kys')
        content = json.loads(fp.read())
        return content
    else:
        return dict()


def save_apikey_json(keydict):
    fp = open('apikeys.kys', 'w')
    s = json.dumps(keydict)
    fp.write(s)
    fp.close()


def extract_html(title, posts, storymode=True):

    thread_author = posts[0]['username']
    cooked_contents = StringIO()
    preface = f'<pre> Title: {title}\n Author: {thread_author}\n </pre>'
    cooked_contents.write(preface)
    # TODO: Add options for saving entire thread
    if storymode:
        for p in posts:
            if p['username'] == thread_author:
                cooked_contents.write(p['cooked'])
                cooked_contents.write('\n<br/>' + SEPLINE + '<br/>\n')
    else:
        for p in posts:
            post_author = p['name']
            date = p['created_at']
            post_mdata = f"Author: {post_author}\n<br/> Date: {date}<br/>\n"
            cooked_contents.write(post_mdata)
            cooked_contents.write(p['cooked'])
            cooked_contents.write('\n<br/>' + SEPLINE + '\n<br/>')
    return cooked_contents.getvalue()


def textify(html):
    soup = BeautifulSoup(html, features="lxml")
    return soup.text
    # TODO: Make text more pretty on the page



@cli.command()
def add_api_key():
    keys = load_apikey_json()
    dmn = click.prompt("Enter Domain of Discourse instance: ")
    un = click.prompt("Enter Username")
    k = click.prompt("Enter API Key")
    authtoken = [un, k]
    if dmn in keys:
        if click.confirm("Key exists change?"):
            keys[dmn] = authtoken
    else:
        keys[dmn] = authtoken
    save_apikey_json(keys)


@cli.command()
@click.argument('uri')
@click.option('--story/--no-story', default=True)
def save(uri, story):
    apikeys = load_apikey_json()
    apiuri = ''
   # extdmn = tldextract.extract(uri)
    dmn = urlparse(uri).netloc
    uname = apikeys[dmn][0]
    uastring = "Mozilla/5.0 (compatible; ThreadExtractor (Anonymous mode) 0.1 +https://notabug.org/Brownowl/discourse-thread-extract"
    if uname:
        uastring = f'Mozilla/5.0 (compatible; ThreadExtractor 0.1 +{dmn}/u/{uname}'
    headers = {'user-agent': uastring, 'Content-type': 'application/json'}
    # here we add a .json to the uri provided if it's a valid discourse url
    # that gets us a 200 and if post_stream exists in response we procede otherwise
    # exit. Rather impolite way of validating your input but all the other ways suck for
    # various reasons
    if uri.endswith('.json'):
        apiuri = uri
    else:
        apiuri = uri + '.json'
    iresponse = None
    log.error(dmn + str(apikeys[dmn]))
    if dmn in apikeys:
        headers['Api-Key'] = apikeys[dmn][1]
        headers['Api-Username'] = uname

    try:
        iresponse = requests.get(
            apiuri, headers=headers, params={'print': True})
        click.echo(iresponse.url)
    except requests.RequestException as e:
        click.echo("Network Error: " + str(e))
        exit(2)

    if iresponse.status_code != 200:
        click.echo("HTTP Error\n")
        exit(iresponse.status_code)

    nfo = iresponse.json()
    if not 'post_stream' in nfo:
        click.echo("Data parsing error.\n Check url and try again\n")
        exit(1)

    post_stream = nfo['post_stream']
    if len(post_stream['posts']) == len(post_stream['stream']):
        h = extract_html(nfo['title'], post_stream['posts'],story)
        t = textify(h)
        click.echo_via_pager(t)
    else:
        # TODO: Threads of more than 1000 posts need to be dealt with
        # But that's for another session
        click.echo("Long thread logic goes here")
        exit(3)
@cli.command()
@click.argument('uri')
@click.argument('terms')
def search(uri, terms):
    apikeys = load_apikey_json()
    apiuri = ''
    extdmn = urlparse(uri).netloc
    dmn = extdmn
    uname = apikeys[dmn][0]
    uastring = "Mozilla/5.0 (compatible; ThreadExtractor (Anonymous mode) 0.1 +https://notabug.org/Brownowl/discourse-thread-extract"
    if uname:
        uastring = f'Mozilla/5.0 (compatible; ThreadExtractor 0.1 +{dmn}/u/{uname}'
    headers = {'user-agent': uastring, 'Content-type': 'application/json'}
    params = {"q": terms}
    log.error(dmn)
    if dmn in apikeys:
        headers['Api-Key'] = apikeys[dmn][1]
        headers['Api-Username'] = uname
    else:
        log.error("Headers")
        exit(3)
    if not uri.endswith("/search.json"):
        print("Invalid search uri")
        exit(2)
    iresponse = requests.get(uri, headers=headers, params=params)
    if iresponse.status_code != 200:
        print("Http Error")
        print(iresponse.status_code)
        exit(3)
    
    netloc = urlparse(uri).netloc
    nfo = iresponse.json()
    for post in nfo['posts']:
        blurb = post['blurb']
        tid = post['topic_id']
        res = f'{blurb}\n' + f'https://{netloc}/t/{tid}' + '\n' + SEPLINE +' \n'
        print(res)
    
    exit(0)



if __name__ == '__main__':
    cli()
