# SPDX-License-Identifier: Apache-2.0
# (C) Copyright IBM Corp. 2024.
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#  http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

# Collection of code data specific annotations and its heuristics are borrowed from:
# CodeParrot  https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot#preprocessing
# BigCode Dataset https://github.com/bigcode-project/bigcode-dataset/tree/main/preprocessing
#
# Code specific heuristics like alpha numeric, char token ratio implementations & others are taken from CodeParrot and BigCode Dataset
# preprocessing scripts and modified according to data-prep-kit specific framework.

CODE_QUALITY_PARAMS = "code_quality"
import os
import re
from argparse import ArgumentParser, Namespace
from collections import Counter

import numpy as np
import pyarrow as pa
from bs4 import BeautifulSoup
from data_processing.transform import AbstractTableTransform, TransformConfiguration
from data_processing.utils import TransformUtils, load_model


os.environ["TOKENIZERS_PARALLELISM"] = "false"


def is_xml(data, lang):
    """
    Check if input data is xml content
    """
    if lang.lower() != "xslt" and "<?xml version=" in data[:100]:
        return True
    return False


def is_html(data, lang):
    """
    Check if input data is HTML files based on displayed text VS code ratio
    """
    if lang.lower() == "html":
        html = data
        try:
            soup = BeautifulSoup(html, features="html.parser")
        except:
            return True

        # kill all script and style elements
        for script in soup(["script", "style"]):
            script.extract()  # rip it out

        # get text
        text = soup.get_text()
        ratio = (len(text) / len(html)) if len(html) > 0 else 0
        if ratio > 0.2 and len(text) > 100:
            return True
    return False


# CODEPARROT FILTERS
def calculate_line_stats(data, lines_max=7):
    """
    Calculates mean and max line length of file
    """
    line_lengths = np.array([len(line) for line in data.splitlines()])
    if line_lengths.shape[0] == 0:
        return {
            "line_mean": 0.0,
            "line_max": 0,
            "avg_longest_lines": 0.0,
            "num_lines": 0,
        }
    if line_lengths.shape[0] < lines_max:
        lines_max = line_lengths.shape[0]
    longest_lines = np.sort(line_lengths)[::-1][:lines_max]
    return {
        "line_mean": np.mean(line_lengths),
        "line_max": np.max(line_lengths),
        "avg_longest_lines": np.mean(longest_lines),
        "num_lines": line_lengths.shape[0],
    }


def calculate_alpha_stats(data):
    """
    Calculates mean alpha numeric of input data
    """
    alphanum_frac = np.mean([c.isalnum() for c in data]) if len(data) > 0 else 0.0
    return {"alphanum_frac": alphanum_frac}


def calculate_char_token_ratio(data, tokenizer) -> dict[str, float]:
    """
    Compute character/token ratio of the file with tokenizer.
    """
    input_ids = tokenizer(data, truncation=False)["input_ids"]
    ratio:float = (len(data) / len(input_ids)) if len(input_ids) > 0 else 0.0
    return {"char_token_ratio": ratio}


def is_autogenerated(data, scan_width=5):
    """
    Check if file is autogenerated by looking for keywords in the first few lines of the file.
    """
    keywords = ["auto-generated", "autogenerated", "automatically generated"]
    lines = data.splitlines()
    for _, line in zip(range(scan_width), lines):
        for keyword in keywords:
            if keyword in line.lower():
                return True
    else:
        return False


def is_config_or_test(data, scan_width=5, coeff=0.2):
    """
    Check if file is a configuration file or a unit test by :
    1- looking for keywords in the first few lines of the file.
    2- counting number of occurrences of the words 'config' and 'test' with respect to number of lines.
    """
    keywords = ["unit tests", "test file", "configuration file"]
    lines = data.splitlines()
    count_config = 0
    count_test = 0
    # first test
    for _, line in zip(range(scan_width), lines):
        for keyword in keywords:
            if keyword in line.lower():
                return True
    # second test
    nlines = data.count("\n")
    threshold = int(coeff * nlines)
    for line in lines:
        count_config += line.lower().count("config")
        count_test += line.lower().count("test")
        if count_config > threshold or count_test > threshold:
            return True
    return False


def has_no_keywords(data, language):
    """
    Check if a python file has none of the keywords for: funcion, class, for loop, while loop.
    """
    if language.lower() == "python":
        keywords = ["def ", "class ", "for ", "while "]
        lines = data.splitlines()
        for line in lines:
            for keyword in keywords:
                if keyword in line.lower():
                    return False
        return True
    return False


def has_few_assignments(data, language, minimum=4):
    """
    Check if file uses symbol '=' less than `minimum` times.
    """
    langs_to_inspect = [
        "java",
        "python",
        "c",
        "c++",
        "c#",
        "go",
        "javascript",
        "go",
        "ruby",
        "perl",
        "swift",
        "rust",
        "r",
        "matlab",
    ]

    if language.lower() in langs_to_inspect:
        lines = data.splitlines()
        counter = 0
        for line in lines:
            counter += line.lower().count("=")
            if counter > minimum:
                return False
        return True
    return False


# metrics inspired from OLMOE https://huggingface.co/datasets/allenai/OLMoE-mix-0924
def top_most_frequent_word_percent(data) -> tuple[float, float]:
    # Convert to lowercase and remove punctuation
    words = re.findall(r"\b\w+\b", data.lower())

    total_words = len(words)
    if total_words == 0:
        return 0.0, 0.0

    word_counts = Counter(words)
    most_common = word_counts.most_common(2)
    _, top_word_count = most_common[0]
    top_word_percent: float = (top_word_count / total_words) * 100.0

    top_two_words_percent: float= 0.0
    if len(most_common) >= 2:
        _, second_top_word_count = most_common[1]
        top_two_words_percent = ((top_word_count + second_top_word_count) / total_words) * 100.0

    return top_word_percent, top_two_words_percent


def alphabetic_percent(data) -> float:
    total_chars = len(data)
    alphabetic_chars_count = sum(1 for char in data if char.isalpha())
    _alphabetic_percent:float = ((alphabetic_chars_count / total_chars) * 100.0) if total_chars > 0 else 0.0
    return _alphabetic_percent


def encoded_data_stats(data):
    base64_regex = re.compile(r"[a-zA-Z0-9+/\n=]{64,}")
    hex_regex = re.compile(r"(?:\b(?:0x|\\x)?[0-9a-fA-F]{2}(?:,|\b\s*)){8,}")
    unicode_regex = re.compile(r"(?:\\u[0-9a-fA-F]{4}){8,}")

    total_matched_length = 0
    max_encoded_data_length = 0
    for match in base64_regex.finditer(data):
        match_length = len(match.group())
        total_matched_length += match_length
        if max_encoded_data_length < match_length:
            max_encoded_data_length = match_length

    for match in hex_regex.finditer(data):
        match_length = len(match.group())
        total_matched_length += match_length
        if max_encoded_data_length < match_length:
            max_encoded_data_length = match_length

    for match in unicode_regex.finditer(data):
        match_length = len(match.group())
        total_matched_length += match_length
        if max_encoded_data_length < match_length:
            max_encoded_data_length = match_length

    encoded_data_percent = (total_matched_length / len(data)) if len(data) > 0 else 0
    return max_encoded_data_length, encoded_data_percent


class CodeQualityTransform(AbstractTableTransform):
    """
    Defines Code Quality specific annotation for code data. Some of the methods inspired from CodeParrot and StarCoder.
    """

    def __init__(self, config: dict):
        super().__init__(config)

        self.code_quality = config.get(CODE_QUALITY_PARAMS)
        if not self.code_quality["hf_token"]:
            self.code_quality["hf_token"] = os.getenv("HF_READ_ACCESS_TOKEN")
        self.tokenizer = load_model(self.code_quality["tokenizer"], 'tokenizer', self.code_quality["hf_token"])

    def transform(self, table: pa.Table, file_name: str = None) -> tuple[list[pa.Table], dict]:
        """
        Chain all preprocessing steps into one function
        """

        TransformUtils.validate_columns(
            table, [self.code_quality["contents_column_name"], self.code_quality["language_column_name"]]
        )

        line_mean_values = []
        line_max_values = []
        no_lines_values = []
        avg_longest_lines_values = []
        alphanum_frac_values = []
        char_token_ratio_values = []
        is_autogenerated_values = []
        is_config_or_test_values = []
        has_no_keywords_values = []
        has_few_assignments_values = []
        is_xml_values = []
        is_html_values = []
        top_word_percents = []
        top_two_words_percents = []
        alphabetic_percents = []
        max_encoded_data_lengths = []
        encoded_data_percents = []

        contents = table.column(self.code_quality["contents_column_name"]).to_pylist()
        languages = table.column(self.code_quality["language_column_name"]).to_pylist()

        # loop over rows and compute filter stats
        for i, c in enumerate(contents):
            # compute lines statistics
            stats = calculate_line_stats(c)
            line_mean_values.append(stats["line_mean"])
            line_max_values.append(stats["line_max"])
            no_lines_values.append(stats["num_lines"])
            avg_longest_lines_values.append(stats["avg_longest_lines"])

            alphanum_frac_values.append(calculate_alpha_stats(c)["alphanum_frac"])
            char_token_ratio_values.append(calculate_char_token_ratio(c, self.tokenizer)["char_token_ratio"])

            is_autogenerated_values.append(is_autogenerated(c))
            is_config_or_test_values.append(is_config_or_test(c))
            has_no_keywords_values.append(has_no_keywords(c, languages[i]))
            has_few_assignments_values.append(has_few_assignments(c, languages[i]))
            is_xml_values.append(is_xml(c, languages[i]))
            is_html_values.append(is_html(c, languages[i]))

            top_word_percent, top_two_words_percent = top_most_frequent_word_percent(c)
            top_word_percents.append(top_word_percent)
            top_two_words_percents.append(top_two_words_percent)

            alphabetic_percents.append(alphabetic_percent(c))

            max_encoded_data_length, encoded_data_percent = encoded_data_stats(c)
            max_encoded_data_lengths.append(max_encoded_data_length)
            encoded_data_percents.append(encoded_data_percent)

        table = TransformUtils.add_column(table=table, name="line_mean", content=line_mean_values)
        table = TransformUtils.add_column(table=table, name="line_max", content=line_max_values)
        table = TransformUtils.add_column(table=table, name="total_num_lines", content=no_lines_values)
        table = TransformUtils.add_column(table=table, name="avg_longest_lines", content=avg_longest_lines_values)
        table = TransformUtils.add_column(table=table, name="alphanum_frac", content=alphanum_frac_values)
        table = TransformUtils.add_column(table=table, name="char_token_ratio", content=char_token_ratio_values)
        table = TransformUtils.add_column(table=table, name="autogenerated", content=is_autogenerated_values)
        table = TransformUtils.add_column(table=table, name="config_or_test", content=is_config_or_test_values)
        table = TransformUtils.add_column(table=table, name="has_no_keywords", content=has_no_keywords_values)
        table = TransformUtils.add_column(table=table, name="has_few_assignments", content=has_few_assignments_values)
        table = TransformUtils.add_column(table=table, name="is_xml", content=is_xml_values)
        table = TransformUtils.add_column(table=table, name="is_html", content=is_html_values)
        table = TransformUtils.add_column(table=table, name="top_word_percent", content=top_word_percents)
        table = TransformUtils.add_column(table=table, name="top_two_words_percent", content=top_two_words_percents)
        table = TransformUtils.add_column(table=table, name="alpha_percent", content=alphabetic_percents)
        table = TransformUtils.add_column(
            table=table, name="max_encoded_data_length", content=max_encoded_data_lengths
        )
        table = TransformUtils.add_column(table=table, name="encoded_data_percent", content=encoded_data_percents)

        return [table], {}


class CodeQualityTransformConfiguration(TransformConfiguration):
    def __init__(self):
        super().__init__(name="code_quality", transform_class=CodeQualityTransform)

    def add_input_params(self, parser: ArgumentParser) -> None:
        parser.add_argument(
            "--cq_contents_column_name",
            required=False,
            type=str,
            dest="contents_column_name",
            default="contents",
            help="Name of the column holds the data to process",
        )
        parser.add_argument(
            "--cq_language_column_name",
            required=False,
            type=str,
            dest="language_column_name",
            default="language",
            help="Name of the column holds the programming language details.",
        )
        parser.add_argument(
            "--cq_tokenizer",
            required=False,
            type=str,
            dest="tokenizer",
            default="codeparrot/codeparrot",
            help="Name or path to the tokenizer.",
        )
        parser.add_argument(
            "--cq_hf_token",
            required=False,
            type=str,
            dest="hf_token",
            default="",
            help="Huggingface auth token to download and use the tokenizer.",
        )

    def apply_input_params(self, args: Namespace) -> bool:
        dargs = vars(args)

        self.params = {
            CODE_QUALITY_PARAMS: {
                "contents_column_name": dargs.get("contents_column_name"),
                "language_column_name": dargs.get("language_column_name"),
                "tokenizer": dargs.get("tokenizer"),
                "hf_token": dargs.get("hf_token"),
            }
        }

        return True
