import re
import math
import pandas as pd

# ====================== 保留初始代码的 URLCleaner 类（处理HTTP）======================
class URLCleaner:
    def __init__(self, entropy_threshold: float = 3.4):
        self.entropy_threshold = entropy_threshold
        self.url_regex = re.compile(
            r'^(?P<scheme>[a-zA-Z]+)://'
            r'(?P<netloc>[^/?#]+)'
            r'(?P<path>/[^?#]*)?'
            r'(?P<query>\?[^#]*)?'
            r'(?P<fragment>#.*)?$'
        )
        self.hash_patterns = [
            re.compile(r'^[a-f0-9]{8,}$'),
            re.compile(r'^[A-Za-z0-9+/]{20,}={0,2}$'),
            re.compile(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', re.I),
            re.compile(r'^1[0-9]{9,12}[a-z0-9]{8,}$')
        ]
        self.subsegment_splitter = re.compile(r'[_.-]')

    def calculate_entropy(self, s: str) -> float:
        if not s:
            return 0.0
        freq = {}
        for c in s.lower():
            freq[c] = freq.get(c, 0) + 1
        total = len(s)
        return -sum((count / total) * math.log2(count / total) for count in freq.values())

    def is_hash_like(self, s: str) -> bool:
        if len(s) < 8:
            return False
        for pattern in self.hash_patterns:
            if pattern.fullmatch(s):
                return True
        return self.calculate_entropy(s) > self.entropy_threshold

    def is_ipv6_like(self, host: str) -> bool:
        if not host:
            return False
        host = host.strip()
        if host.startswith('['):
            end_idx = host.find(']')
            host = host[1:end_idx] if end_idx != -1 else host[1:]
        if ':' not in host:
            return False
        if host.count('::') > 1:
            return False
        if '::' in host:
            left_part, right_part = host.split('::', 1)
            left_segs = left_part.split(':') if left_part else []
            right_segs = right_part.split(':') if right_part else []
            if len(left_segs) + len(right_segs) > 7:
                return False
            segments = [s for s in left_segs + right_segs if s]
        else:
            segments = host.split(':')
            if len(segments) != 8:
                return False
        hex_pattern = re.compile(r'^[0-9a-fA-F]{1,4}$')
        for seg in segments:
            if not hex_pattern.fullmatch(seg):
                return False
        if segments and re.fullmatch(r'(\d{1,3}\.){3}\d{1,3}', segments[-1]):
            ip_parts = segments[-1].split('.')
            try:
                return all(0 <= int(part) <= 255 for part in ip_parts)
            except:
                return False
        return True

    def clean_segment(self, segment: str) -> str:
        if '.' in segment:
            parts = segment.split('.')
            extension = parts[-1]
            if not self.is_hash_like(extension) and len(extension) <= 10:
                return f"xx.{extension}"
        subsegments = self.subsegment_splitter.split(segment)
        kept = []
        for sub in subsegments:
            if not self.is_hash_like(sub):
                kept.append(sub)
        cleaned_segment = '_'.join(kept) if kept else ''
        if any(self.is_hash_like(p) for p in self.subsegment_splitter.split(cleaned_segment)):
            return ''
        return cleaned_segment

    def clean_url(self, url: str) -> str:
        if not re.match(r'^[a-zA-Z]+://', url):
            return url
        match = self.url_regex.match(url)
        if not match:
            return url
        parts = match.groupdict()
        original_netloc = parts['netloc']
        if self.is_ipv6_like(original_netloc):
            return ''
        if '[' in original_netloc and ']' in original_netloc:
            start = original_netloc.find('[') + 1
            end = original_netloc.find(']')
            netloc = original_netloc[start:end]
        else:
            netloc = original_netloc.split(':', 1)[0]
        if re.match(r'^(\d{1,3}\.){3}\d{1,3}$', netloc):
            try:
                if all(0 <= int(part) <= 255 for part in netloc.split('.')):
                    return ''
            except:
                pass
        if not any(c.isalpha() for c in netloc):
            return ''
        cleaned_path = '/'
        if parts['path']:
            segments = [self.clean_segment(s) for s in parts['path'].strip('/').split('/')]
            non_empty_segments = [s for s in segments if s]
            if non_empty_segments:
                path_str = '/'.join(non_empty_segments)
                cleaned_path = '/' + path_str
                if parts.get('query') or parts.get('fragment'):
                    cleaned_path += '/'
        cleaned_query = parts.get('query', '')
        if cleaned_query:
            try:
                if cleaned_query.startswith('?'):
                    base_query = cleaned_query[1:]
                    params = base_query.split('&')
                    cleaned_params = []
                    for param in params:
                        if '=' in param:
                            k, v = param.split('=', 1)
                            if not self.is_hash_like(v):
                                cleaned_params.append(f"{k}={v}")
                            else:
                                cleaned_params.append(f"{k}=xx")
                        else:
                            cleaned_params.append(param)
                    cleaned_query = '?' + '&'.join(cleaned_params) if cleaned_params else ''
            except:
                cleaned_query = ''
        result = f"{parts['scheme']}://{parts['netloc']}{cleaned_path}"
        if cleaned_query:
            result += cleaned_query
        if parts.get('fragment'):
            result += parts['fragment']
        return result


# ====================== 保留HTTPS的处理方法（StrictDomainCleaner 类）======================
class StrictDomainCleaner:
    def __init__(self):
        self.ip_pattern = re.compile(r'^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$')
        self._init_patterns()

    def _init_patterns(self):
        self.patterns = [
            re.compile(r'^[a-f0-9]{8,}$', re.I),
            re.compile(r'^[0-9]{8,}$'),
            re.compile(r'^[a-f0-9]{4}-[a-f0-9]{4}$', re.I),
            re.compile(r'^\d+-\d+(-\d+)+$'),
            re.compile(r'^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$', re.I),
            re.compile(r'\b[a-f0-9]{12,}-[a-f0-9]{12,}\b', re.I),
            re.compile(r'^[a-f0-9]{16,}$', re.I),
            re.compile(r'\b[a-z0-9]+(?:-[a-z0-9]+){4,}\b', re.I),
            re.compile(r'\b[a-z0-9]{22,}\b', re.I),
        ]

    def is_ip_address(self, domain: str) -> bool:
        return bool(self.ip_pattern.match(domain))

    def is_random_like(self, s: str) -> bool:
        for p in self.patterns:
            if p.fullmatch(s):
                return True
        return False

    def clean_domain(self, domain: str) -> str:
        if domain.endswith('.tdum.alibaba.com'):
            parts = domain.split('.')
            if len(parts) > 3:
                domain = '.'.join(parts[-3:])
        host, _, port = domain.partition(':') if ':' in domain else (domain, '', None)
        if self.is_ip_address(host):
            return ""
        cleaned_parts = [p for p in host.split('.') if not self.is_random_like(p)]
        return f"{'.'.join(cleaned_parts)}:{port}" if port else '.'.join(cleaned_parts) if cleaned_parts else ""

    def clean_url(self, url: str) -> str:
        if '://' not in url:
            return ""
        scheme, rest = url.split('://', 1)
        domain_part = rest.split('/', 1)[0]
        cleaned_domain = self.clean_domain(domain_part)
        return f"{scheme}://{cleaned_domain}{rest[len(domain_part):]}" if cleaned_domain else ""


# ====================== 核心修改：适配CSV文件+新列名+协议判断逻辑 ======================
def process_csv_with_unique_mark(
    input_file: str,
    output_file: str,
    deduplicate: bool = False,
    keep_first: bool = True
) -> None:
    """
    适配新输入：|分隔CSV文件（含destinationip、destinationport、destination_url、servicetype列）
    核心变更：
    1. 读取方式改为CSV（sep='|'）；
    2. 协议判断从data_type改为：URL自带协议 → 无协议则按destinationport（80=HTTP，443=HTTPS）；
    3. 校验列更新为新文件必需列。
    """
    # 初始化清洗器（不变）
    http_cleaner = URLCleaner()
    https_cleaner = StrictDomainCleaner()

    # 1. 读取CSV文件（核心修改：替换read_excel为read_csv，指定分隔符|）
    print("正在读取CSV文件...")
    original_df = pd.read_csv(input_file, sep='|', dtype=str,encoding_errors='ignore')  # dtype=str避免端口等字段转成数字
    original_df['original_id'] = original_df.index  # 保留原始行号（便于追溯）

    # 2. 校验新文件必需列（核心修改：替换data_type为destinationport、servicetype）
    # required_columns = ['destinationip', 'destinationport', 'destination_url', 'servicetype']
    # missing_cols = [col for col in required_columns if col not in original_df.columns]
    # if missing_cols:
    #     raise ValueError(f"CSV文件中未找到必要列：{', '.join(missing_cols)}")

    # 3. 协议补全与清洗逻辑（核心修改：协议判断逻辑）
    print("正在根据端口补全URL协议并清洗...")
    def complete_protocol_and_clean(row):
        """
        新逻辑：
        - 有协议URL：直接按协议选择清洗器；
        - 无协议URL：按destinationport补协议（80→HTTP，443→HTTPS）；
        - 其他端口/无端口：清洗后返回空。
        """
        original_url = str(row['destination_url']).strip()
        dest_port = str(row['destinationport']).strip()  # 端口可能是字符串（如"80"）或数字
        processed_url = original_url

        # 步骤1：处理空URL
        if not processed_url or processed_url.lower() in ['nan', 'none']:
            return ("", "")

        # 步骤2：补全协议（核心修改：用端口替代data_type）
        if '://' not in processed_url:
            # 无协议时，按端口判断
            if dest_port == '80':
                processed_url = f"http://{processed_url}"
            elif dest_port == '443':
                processed_url = f"https://{processed_url}"
            else:
                # 非80/443端口，无法确定协议，清洗后返回空
                return (processed_url, "")

        # 步骤3：分协议清洗（不变）
        if processed_url.startswith("http://"):
            cleaned_url = http_cleaner.clean_url(processed_url)
        elif processed_url.startswith("https://"):
            cleaned_url = https_cleaner.clean_url(processed_url)
        else:
            cleaned_url = ""  # 非HTTP/HTTPS协议，返回空

        return (processed_url, cleaned_url)

    # 应用协议补全和清洗（不变）
    result = original_df.apply(complete_protocol_and_clean, axis=1, result_type="expand")
    original_df['processed_url'] = result[0]  # 补全协议后的URL（中间结果，便于查看）
    original_df['cleaned_url'] = result[1]    # 最终清洗后的URL

    # 4. 标记唯一基准URL（不变）
    print("正在标记唯一基准URL...")
    seen_urls = set()
    is_unique_base = []
    for url in original_df['cleaned_url']:
        if url and url not in seen_urls:
            is_unique_base.append(True)
            seen_urls.add(url)
        else:
            is_unique_base.append(False)
    original_df['is_unique_base'] = is_unique_base

    # 5. 可选去重（不变）
    if deduplicate:
        final_df = original_df.drop_duplicates(subset=['cleaned_url'], keep='first' if keep_first else 'last')
        print(f"已去重：保留{('首次' if keep_first else '最后')}出现的记录，共{len(final_df)}行")
    else:
        final_df = original_df.copy()
        print(f"未去重：保留所有{len(final_df)}行记录")

    # 6. 清理临时列+保存（不变，输出仍为XLSX，便于查看）
    keep_columns = [col for col in original_df.columns if col not in ['processed_url']]
    final_df = final_df[keep_columns]
    final_df.to_excel(output_file, index=False)
    print(f"\n处理完成！结果已保存至：{output_file}")

    # 7. 验证结果（不变）
    try:
        verify_df = pd.read_excel(output_file)
        if 'is_unique_base' in verify_df.columns:
            total = len(verify_df)
            unique_count = verify_df['is_unique_base'].sum()
            print(f"验证成功：标记列'is_unique_base'存在")
            print(f"统计：总记录数={total} | 唯一基准URL数={unique_count} | 重复/无效URL数={total - unique_count}")
        else:
            print("警告：输出文件中未找到标记列'is_unique_base'")
    except Exception as e:
        print(f"验证失败：{str(e)}")


# ====================== 调用逻辑修改：适配CSV输入文件 ======================
if __name__ == "__main__":
    # 请修改为你的实际CSV输入路径和输出XLSX路径
    INPUT_CSV = "./input.csv"  # 输入：|分隔的CSV文件
    OUTPUT_XLSX = "cleaned_url_result.xlsx"  # 输出：带唯一标记的XLSX文件

    # 执行处理（参数说明：deduplicate=True开启去重，keep_first=True保留首次出现记录）
    process_csv_with_unique_mark(
        input_file=INPUT_CSV,
        output_file=OUTPUT_XLSX,
        deduplicate=True,  # 按需设置为True开启去重
        keep_first=True
    )