"""Message handlers."""
import argparse
from collections import Counter
from datetime import timedelta
import os
import re

from cki_lib import metrics
from cki_lib import misc
from cki_lib.logger import get_logger
from cki_lib.messagequeue import MessageQueue
from dateutil.parser import parse as date_parse
import sentry_sdk

from . import slack

LOGGER = get_logger('cki_tools.slack_bot.amqp')


def last_stage_run(jobs) -> str | None:
    """Return the stage of the last run job."""
    sorted_jobs = sorted(jobs, reverse=True, key=lambda k: k['id'])
    return next((x['stage'] for x in sorted_jobs if x['status'] != 'skipped'), None)


def handle_sentry(hook_json):
    """Handle a Sentry alert."""
    if 'event' not in hook_json['data'] or \
            hook_json['action'] != 'triggered':
        return []
    event = hook_json['data']['event']

    project_name = re.sub('.*/([^/]+)/events/.*', r'\1', event['url'])
    message = event['title']
    url = event['web_url']

    message = f'😩 {project_name}: {message} - <{url}|sentry>'
    return [message]


def handle_pipeline(hook_json):
    # pylint: disable=too-many-statements,too-many-branches,too-many-locals
    """Handle a pipeline webhook."""
    messages = []

    pipeline_id = hook_json['object_attributes']['id']
    pipeline_status = hook_json['object_attributes']['status']
    trigger_vars = {
        x['key']: x['value'] for x in
        hook_json['object_attributes']['variables']
    }

    if log_message := trigger_vars.get(f'deployment_bot_message_{pipeline_status}'):
        messages += [log_message]

    # For the following, only care about 'pipeline' repositories, by lack of a
    # better way identified by having 'pipelines' in their full path.
    if 'pipelines' not in misc.get_nested_key(hook_json, 'project/path_with_namespace', ''):
        return messages

    # Only log failed pipelines. Too many notifications cause us to miss
    # important messages. There's no action for us to take for the other
    # statuses.
    if pipeline_status != 'failed':
        return messages

    # Ignore stages that in 99% of cases fail for valid reasons and only
    # report stages that usually mean an infra failure. We ignore the
    # messages about valid looking failures anyways and unless someone
    # pings us or we get a sentry error or similar, don't look into them.
    jobs = hook_json['builds']
    if jobs and last_stage_run(jobs) in ['merge', 'build', 'build-tools', 'kernel-results']:
        return messages

    # Skip retriggers and Brew scratch builds
    if ((trigger_vars.get('CKI_DEPLOYMENT_ENVIRONMENT', 'production') != 'production') or
            misc.strtobool(trigger_vars.get('scratch', 'false'))):
        return messages

    # yaml error, pipeline could not be created and no variables are present
    # use a hack to try to detect retriggered pipelines in that case
    # this does not work for pipelines without a commit
    commit_message = misc.get_nested_key(hook_json, 'commit/message', '')
    if not trigger_vars and 'retrigger' in commit_message:
        return messages

    project_baseurl = hook_json['project']['web_url']
    pipeline_url = f"{project_baseurl}/pipelines/{pipeline_id}"
    msg = f"<{pipeline_url}|P{pipeline_id}>: {pipeline_status.upper()}"

    stages = hook_json['object_attributes']['stages']
    stage_jobs = {}
    stage_messages = []
    for stage in stages:
        if stage == 'review':
            continue

        jobs_in_stage = [
            x for x in hook_json['builds'] if x['stage'] == stage
            and x['status'] != 'skipped'
        ]
        sorted_jobs_in_stage = sorted(
            jobs_in_stage,
            reverse=True,
            key=lambda k: date_parse(k['created_at'])
        )
        stage_jobs[stage] = []
        for job in sorted_jobs_in_stage:
            if job['name'] not in [x['name'] for x in stage_jobs[stage]]:
                stage_jobs[stage].append(job)

        stage_count = Counter([x['status'] for x in stage_jobs[stage]])

        if stage_count['failed'] > 0:
            # At least one job failed in the stage.
            color = '*'
        elif stage_count['running'] > 0:
            # At least one job in the stage is still running.
            color = '_'
        elif stage_count['skipped'] == len(stage_jobs[stage]):
            # All of the jobs in this stage were skipped!
            color = ''
        elif stage_count['success'] == len(stage_jobs[stage]):
            # All of the jobs passed.
            color = ''
        else:
            color = ''

        stage_messages.append(f'{color}{stage[0].upper()}{color}')

    msg += f" [{''.join(stage_messages)}]"

    # If the pipeline duration is set, send that to the chat channel.
    if 'duration' in hook_json['object_attributes']:
        if hook_json['object_attributes']['duration']:
            duration = int(hook_json['object_attributes']['duration'])
            pretty_duration = str(timedelta(seconds=duration))
            msg += f" `{pretty_duration}`"

    # Add on extra trigger variables
    msg += f" {trigger_vars.get('title', 'Pipeline crash, no jobs created')} "

    messages += msg.split('\n')

    return messages


def handle_issue(hook_json):
    """Handle an issue webhook."""
    if hook_json['object_attributes']['action'] not in ('open', 'close'):
        return []

    username = hook_json['user']['username']
    path_with_namespace = hook_json['project']['path_with_namespace']
    iid = hook_json['object_attributes']['iid']
    state = hook_json['object_attributes']['state']
    title = hook_json['object_attributes']['title']
    url = hook_json['object_attributes']['url']
    details = (
        f'<{url}|*{path_with_namespace}#{iid}*>',
        username,
        f'[{state.upper()}]',
        title,
    )
    return [' '.join(details)]


def handle_gitlab(body):
    """Process a GitLab webhook message."""
    if body.get('object_kind') == 'pipeline':
        return handle_pipeline(body)
    if body.get('object_kind') == 'issue':
        return handle_issue(body)
    return []


def handle_chatbot(body):
    """Process a chatbot message."""
    return [body.get('message')]


def process_message(body=None, headers=None, routing_key=None, **_):
    """Process a webhook message."""
    message_type = (headers or {}).get('message-type')
    routing_key = routing_key or ''
    if message_type == 'gitlab':
        lines = handle_gitlab(body)
    elif message_type == 'sentry':
        lines = handle_sentry(body)
    elif message_type == 'chatbot':
        lines = handle_chatbot(body)
    else:
        lines = []
    for line in lines:
        slack.send_message(line)
    return lines


def main(args: list[str] | None = None) -> None:
    """Run main loop."""
    parser = argparse.ArgumentParser()
    parser.parse_args(args)

    webhooks_exchange = os.environ.get(
        'WEBHOOK_RECEIVER_EXCHANGE', 'cki.exchange.webhooks')

    metrics.prometheus_init()

    MessageQueue().consume_messages(
        webhooks_exchange, os.environ['SLACK_BOT_ROUTING_KEYS'].split(),
        process_message,
        queue_name=os.environ.get('SLACK_BOT_QUEUE'))


if __name__ == "__main__":
    misc.sentry_init(sentry_sdk)
    main()
