import math
import re
import urllib
import numpy as np
import pandas as pd


pathname_depth=0
# 模拟bad_urls模块中的get_txt函数，实际使用中需替换为正确实现
def get_txt(file_path):
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read().splitlines()
    except FileNotFoundError:
        print(f"文件 {file_path} 未找到，请检查路径。")
        return []


# 判断字符x是否为数字
def is_digit(x):
    return 48 <= ord(x) <= 57


# 判断字符x是否为字母
def is_letter(x):
    return (97 <= ord(x) <= 122) or (65 <= ord(x) <= 90)


# 判断是否既不是数字也不是字母的特殊字符
def is_special_ch(x):
    return not (is_letter(x) or is_digit(x))


# 判断是否含有数字
def contain_dig(s):
    return any(is_digit(x) for x in s)


# 判断是否含有英文字母
def contain_letter(s):
    return any(is_letter(x) for x in s)


# 定义一个函数，用于计算字符串的熵
def calculate_entropy(string):
    str_list = list(string)
    n = len(str_list)
    str_list_single = list(set(str_list))
    num_list = [str_list.count(i) for i in str_list_single]
    entropy = sum(-(float(num / n)) * math.log(float(num / n), 2) for num in num_list)
    if len(str(entropy).split('.')[-1]) >= 7:
        return ('%.7f' % entropy)
    return entropy


# 定义一个函数，用于获取字符串列表的长度
def getLength_std(str_list):
    if not str_list:
        return (0, 0)
    str_list_len = sum(len(s) for s in str_list)
    str_list_len_count = [len(s) for s in str_list]
    str_arr = np.array(str_list_len_count)
    str_list_std = str_arr.std()
    return (str_list_len, str_list_std)


def extract_all_features(url):
    if '?' not in url:
        url+='?'

    original_url = url
    url = url.lower()
    url_len = len(url)

    # 去除协议和www前缀
    for prefix in ['http://', 'https://', 'www']:
        if prefix in url and url.find(prefix) < url.find('/'):
            url = url.replace(prefix, '')

    url_letter_ratio = 0
    url_dig_ratio = 0
    url_ch_kind_n = 0
    url_ch_n = 0
    url_depth = 0
    url_point_n = 0
    url_contain_at = 0

    ch_list = []
    letter_num = 0
    dig_num = 0
    for i in range(len(url)):
        if is_letter(url[i]):
            letter_num += 1
        elif is_digit(url[i]):
            dig_num += 1
        elif url[i] == '/':
            url_depth += 1
        elif url[i] == '.':
            url_point_n += 1
        elif url[i] == '@':
            url_contain_at += 1
        else:
            ch_list.append(url[i])

    url_letter_ratio = letter_num / url_len
    url_dig_ratio = dig_num / url_len
    url_ch_n = len(ch_list)
    url_ch_kind_n = len(set(ch_list))

    url_port = 80
    parts = url.split('/')
    first_part = parts[0]
    hostname = first_part.split('.')

    if not contain_dig(hostname[-1]):
        tld = hostname[-1].split(':')
        url_TLD_temp = tld[0]
        if len(tld) == 2 and isinstance(tld[1], int):
            url_port = tld[1]
        common_tlds = []
        url_TLD = 500
        for i in range(len(common_tlds)):
            if common_tlds[i] == url_TLD_temp:
                url_TLD = i
                break
    else:
        url_TLD = 1000

    url_badword_n = 0
    badwords = get_txt('./data/badwords.txt')
    for badword in badwords:
        if badword in url:
            url_badword_n += 1

    url_popular_n = 0
    popular_web_words = get_txt('./data/popular_web.txt')
    for popular_word in popular_web_words:
        if popular_word in url:
            url_popular_n += 1

    url_exe_n = 0
    for keyword in ['.exe', '.php']:
        if keyword in url:
            url_exe_n += 1

    url_http_n = 0
    for keyword in ['http', 'www']:
        if keyword in url and url.find(keyword) > 0:
            url_http_n += 1

    # hostname 相关特征
    host_tail = url.find('/')
    url_part = url[:host_tail] if host_tail != -1 else url

    hostname_ch_n = 0
    hostname_letter_num = 0
    hostname_dig_num = 0
    hostname_point_n = 0
    hostname_is_ip = 1

    if contain_letter(url_part):
        hostname_is_ip = 0

    for i in range(len(url_part)):
        if is_letter(url_part[i]):
            hostname_letter_num += 1
        elif is_digit(url_part[i]):
            hostname_dig_num += 1
        elif url_part[i] == '.':
            hostname_point_n += 1
        else:
            hostname_ch_n += 1

    hostname_dig_ratio = hostname_dig_num / len(url_part)
    hostname_letter_ratio = hostname_letter_num / len(url_part)

    hostname = re.split('[-_/&.()<>^@!#$*=+~:; ]', url_part)
    hostname = [token for token in hostname if token]
    hostname_entropy = calculate_entropy(url_part)

    # pathname、search、hash 相关特征
    pathname = []
    search = []
    hash = []
    pathname_depth=0
    if host_tail != -1 and host_tail != len(url) - 1:
        remains = url[host_tail + 1:]
        pathname_tail = remains.find('?')
        if pathname_tail != -1:
            ch_list = []
            pathname_part = remains[:pathname_tail]
            for ch_in_path in pathname_part:
                if ch_in_path == '/':
                    pathname_depth += 1
                elif is_special_ch(ch_in_path):
                    ch_list.append(ch_in_path)
            pathname_ch_kind = len(set(ch_list))

            pathname = re.split('[-_/&.()<>^@!#$*=+~:; ]', pathname_part)
            pathname = [token for token in pathname if token]

            pathname_longest_token = max([len(token) for token in pathname], default=0)

            search_and_hash = remains[pathname_tail + 1:]
            search_tail = search_and_hash.find('#')
            if search_tail != -1:
                search = search_and_hash[:search_tail]
            else:
                search = search_and_hash
            search_and_n = search.count('&')
            search = re.split('[-_/&.()<>^@!#$*=+~:; ]', search)
            search = [token for token in search if token]

            if search_tail != -1:
                hash.append(search_and_hash[search_tail + 1:])
        else:
            ch_list = []
            pathname_part = remains if remains else '/'
            for ch_in_path in pathname_part:
                if ch_in_path == '/':
                    pathname_depth += 1
                elif is_special_ch(ch_in_path):
                    ch_list.append(ch_in_path)
            pathname_ch_kind = len(set(ch_list))

            pathname = re.split('[-_/&.()<>^@!#$*=+~:; ]', remains)
            pathname = [token for token in pathname if token]
            search = []
            hash = []
    else:
        pathname_depth = 0
        pathname_ch_kind = 0
        pathname_longest_token = 0
        search_and_n = 0
        pathname = []
        search = []
        hash = []

    hostname_len, hostname_std = getLength_std(hostname)
    pathname_len, pathname_std = getLength_std(pathname)
    search_len, search_std = getLength_std(search)

    return pd.Series({
        'URL_len': url_len,
        'letter_ratio': url_letter_ratio,
        'dig_ratio': url_dig_ratio,
        'special_ch_kind': url_ch_kind_n,
        'special_ch': url_ch_n,
        'URL_depth': url_depth,
        'URL_point': url_point_n,
        'at_flag': url_contain_at,
        'TLD_id': url_TLD,
        'badword_n': url_badword_n,
        'popular_n': url_popular_n,
        'exe_flag': url_exe_n,
        'http_flag': url_http_n,
        'URL_a': url.count('a'),
        'URL_b': url.count('b'),
        'URL_c': url.count('c'),
        'URL_d': url.count('d'),
        'URL_e': url.count('e'),
        'URL_f': url.count('f'),
        'URL_g': url.count('g'),
        'URL_h': url.count('h'),
        'URL_i': url.count('i'),
        'URL_j': url.count('j'),
        'URL_k': url.count('k'),
        'URL_l': url.count('l'),
        'URL_m': url.count('m'),
        'URL_n': url.count('n'),
        'URL_o': url.count('o'),
        'URL_p': url.count('p'),
        'URL_q': url.count('q'),
        'URL_r': url.count('r'),
        'URL_s': url.count('s'),
        'URL_t': url.count('t'),
        'URL_u': url.count('u'),
        'URL_v': url.count('v'),
        'URL_w': url.count('w'),
        'URL_x': url.count('x'),
        'URL_y': url.count('y'),
        'URL_z': url.count('z'),
        'hostname_a': url_part.count('a'),
        'hostname_b': url_part.count('b'),
        'hostname_c': url_part.count('c'),
        'hostname_d': url_part.count('d'),
        'hostname_e': url_part.count('e'),
        'hostname_f': url_part.count('f'),
        'hostname_g': url_part.count('g'),
        'hostname_h': url_part.count('h'),
        'hostname_i': url_part.count('i'),
        'hostname_j': url_part.count('j'),
        'hostname_k': url_part.count('k'),
        'hostname_l': url_part.count('l'),
        'hostname_m': url_part.count('m'),
        'hostname_n': url_part.count('n'),
        'hostname_o': url_part.count('o'),
        'hostname_p': url_part.count('p'),
        'hostname_q': url_part.count('q'),
        'hostname_r': url_part.count('r'),
        'hostname_s': url_part.count('s'),
        'hostname_t': url_part.count('t'),
        'hostname_u': url_part.count('u'),
        'hostname_v': url_part.count('v'),
        'hostname_w': url_part.count('w'),
        'hostname_x': url_part.count('x'),
        'hostname_y': url_part.count('y'),
        'hostname_z': url_part.count('z'),
        'hostname_token_n': len(hostname),
        'hostname_len': hostname_len,
        'hostname_ch_n': hostname_ch_n,
        'hostname_letter_ratio': hostname_letter_ratio,
        'hostname_dig_ratio': hostname_dig_ratio,
        'hostname_entropy': hostname_entropy,
        'hostname_point_n': hostname_point_n,
        'hostname_is_ip': hostname_is_ip,
        'hostname_std': hostname_std,
        'pathname_token_n': len(pathname),
        'pathname_len': pathname_len,
        'pathname_depth': pathname_depth,
        'pathname_longest_token': pathname_longest_token,
        'pathname_ch_kind': pathname_ch_kind,
        'pathname_std': pathname_std,
        'search_token_n': len(search),
        'search_len': search_len,
        'search_std': search_std,
        'search_and_n': search_and_n,
        'hash_token_n': len(hash)
    })

