import csv
import os
import xml.sax
import time
from collections import defaultdict
from itertools import combinations
from datetime import datetime
from models import Pattern

# DBLP数据集挖掘配置常量
DBLP_MAX_RECORDS = 2000  # 处理的最大记录数（论文数）
DBLP_MAX_AUTHORS = 5      # 每条记录最多考虑的作者数


def parse_file(csv_file):
    patterns = []
    items = []

    try:
        with open(csv_file, mode='r', encoding='gbk') as file:
            reader = csv.DictReader(file)
            item_column = '常用app' if 'app' in csv_file else '大学生活'

            for row in reader:
                if not row.get('ID') or not row.get(item_column):
                    continue
                    
                tokens = [t.strip() for t in row[item_column].split('、') if t.strip()]
                for token in tokens:
                    if token not in items:
                        items.append(token)
                pattern = Pattern(int(row['ID']), row['性别'], tokens)
                patterns.append(pattern)

    except FileNotFoundError:
        pass
    except Exception as e:
        pass
    return patterns, items


def filter_by_gender(patterns, gender_filter):
    if gender_filter == '所有':
        return patterns
    return [p for p in patterns if p.name == gender_filter]


def get_transactions(patterns):
    return [set(p.tokens) for p in patterns]


def generate_candidates(itemset_list, k):
    """生成k-项候选集"""
    candidates = []
    n = len(itemset_list)
    
    # 1. 连接 (Join)
    # 合并前k-2项相同的k-1项集
    for i in range(n):
        for j in range(i + 1, n):
            itemset_i = sorted(list(itemset_list[i]))
            itemset_j = sorted(list(itemset_list[j]))
            
            # 检查前k-2项是否相同
            if k == 2:
                # 对于2-项集，直接合并两个1-项集
                union = frozenset(itemset_list[i] | itemset_list[j])
                if len(union) == 2:
                    candidates.append(union)
            else:
                # 对于k>=3，检查前k-2项是否相同
                if itemset_i[:k-2] == itemset_j[:k-2] and itemset_i[-1] < itemset_j[-1]:
                    union = frozenset(itemset_list[i] | itemset_list[j])
                    if len(union) == k:
                        candidates.append(union)
    
    # 去重
    candidates = list(set(candidates))
    
    # 2. 剪枝
    # 移除任何包含非频繁k-1子集的候选
    frequent_itemsets_set = set(itemset_list)
    pruned_candidates = []
    
    for candidate in candidates:
        # 生成候选的所有k-1子集
        candidate_list = list(candidate)
        is_valid = True
        
        for i in range(len(candidate_list)):
            # 生成删除第i个元素后的k-1子集
            subset = frozenset(candidate_list[:i] + candidate_list[i+1:])
            if subset not in frequent_itemsets_set:
                is_valid = False
                break
        
        if is_valid:
            pruned_candidates.append(candidate)
    
    return pruned_candidates


def count_support(transactions, itemset):
    """计算项集的支持度计数"""
    count = 0
    for transaction in transactions:
        if itemset.issubset(transaction):
            count += 1
    return count


def generation_frequent_itemset(min_support, patterns, progress_callback=None):
    """生成频繁项集"""
    transactions = get_transactions(patterns)
    num_transactions = len(transactions)
    
    if num_transactions == 0:
        return []
    
    frequent_itemsets = []
    k = 1
    
    # 生成1-频繁项集
    if progress_callback:
        progress_callback(1, "正在挖掘1-项频繁项集...")
    
    all_items = set()
    for transaction in transactions:
        all_items.update(transaction)
    
    frequent_1_itemsets = []
    item_support = {}

    for item in all_items:
        itemset = frozenset([item])
        support_count = count_support(transactions, itemset)
        support = support_count / num_transactions
        
        if support_count >= min_support:
            frequent_1_itemsets.append(itemset)
            item_support[itemset] = support_count
    
    if not frequent_1_itemsets:
        return []
    
    if progress_callback:
        progress_callback(1, f"1-项频繁项集完成，找到 {len(frequent_1_itemsets)} 个")
    
    frequent_itemsets.append(frequent_1_itemsets)
    
    # 生成k-频繁项集 (k >= 2)
    current_frequent = frequent_1_itemsets
    
    while current_frequent:
        k += 1
        if progress_callback:
            progress_callback(k, f"正在挖掘{k}-项频繁项集...")
        
        # 生成候选集
        candidates = generate_candidates(current_frequent, k)
        
        if not candidates:
            break
        
        # 计算支持度并筛选频繁项集
        next_frequent = []
        for candidate in candidates:
            support_count = count_support(transactions, candidate)
            if support_count >= min_support:
                next_frequent.append(candidate)
                item_support[candidate] = support_count
        
        if not next_frequent:
            break
        
        if progress_callback:
            progress_callback(k, f"{k}-项频繁项集完成，找到 {len(next_frequent)} 个")
        
        frequent_itemsets.append(next_frequent)
        current_frequent = next_frequent
    
    # 返回所有频繁项集及其支持度
    result = []
    for itemset_list in frequent_itemsets:
        for itemset in itemset_list:
            result.append({
                'itemset': itemset,
                'support_count': item_support[itemset],
                'support': item_support[itemset] / num_transactions
            })
    
    return result


def generate_association_rules(frequent_itemsets, min_confidence=0.5):
    """从频繁项集生成关联规则"""
    rules = []
    
    for itemset_info in frequent_itemsets:
        itemset = itemset_info['itemset']
        itemset_support_count = itemset_info['support_count']
        
        if len(itemset) < 2:
            continue
        
        # 生成所有可能的规则
        itemset_list = list(itemset)
        
        for i in range(1, len(itemset)):
            for antecedent in combinations(itemset_list, i):
                antecedent = frozenset(antecedent)
                consequent = itemset - antecedent
                
                # 计算置信度需要前件在频繁项集中的支持度
                # 需要找到前件的支持度计数
                antecedent_support_count = None
                for freq_info in frequent_itemsets:
                    if freq_info['itemset'] == antecedent:
                        antecedent_support_count = freq_info['support_count']
                        break
                
                if antecedent_support_count and antecedent_support_count > 0:
                    confidence = itemset_support_count / antecedent_support_count
                    
                    if confidence >= min_confidence:
                        rules.append({
                            'antecedent': antecedent,
                            'consequent': consequent,
                            'support_count': itemset_support_count,
                            'confidence': confidence
                        })
    
    return rules


def format_results(filename, min_support, gender_filter, frequent_itemsets, rules):
    """格式化结果文本"""
    lines = []
    lines.append(f"\n{'='*80}")
    lines.append(f"文件: {filename}")
    lines.append(f"最小支持度: {min_support}")
    lines.append(f"性别筛选: {gender_filter}")
    lines.append(f"{'='*80}\n")
    
    lines.append(f"频繁模式结果 (共 {len(frequent_itemsets)} 个):")
    lines.append("-" * 80)
    
    # 按项集大小分组显示
    itemsets_by_size = defaultdict(list)
    for itemset_info in frequent_itemsets:
        size = len(itemset_info['itemset'])
        itemsets_by_size[size].append(itemset_info)
    
    for size in sorted(itemsets_by_size.keys()):
        lines.append(f"\n{size}-项频繁项集:")
        for itemset_info in sorted(itemsets_by_size[size], 
                                 key=lambda x: x['support_count'], reverse=True):
            itemset_str = "、".join(sorted(itemset_info['itemset']))
            lines.append(f"  [{itemset_str}] - 支持度计数: {itemset_info['support_count']}, "
                  f"支持度: {itemset_info['support']:.2%}")
    
    lines.append(f"\n\n关联规则结果 (共 {len(rules)} 个, 最小置信度: 50%):")
    lines.append("-" * 80)
    
    for rule in sorted(rules, key=lambda x: x['confidence'], reverse=True):
        antecedent_str = "、".join(sorted(rule['antecedent']))
        consequent_str = "、".join(sorted(rule['consequent']))
        lines.append(f"  [{antecedent_str}] => [{consequent_str}]")
        lines.append(f"    支持度计数: {rule['support_count']}, 置信度: {rule['confidence']:.2%}")
    
    lines.append(f"\n{'='*80}\n")
    
    return "\n".join(lines)


def print_results(filename, min_support, gender_filter, frequent_itemsets, rules):
    """格式化结果（已废弃，保留以兼容）"""
    return format_results(filename, min_support, gender_filter, frequent_itemsets, rules)


def write_results_to_csv(filename, min_support, gender_filter, frequent_itemsets, rules):
    """将单个条件的挖掘结果写入CSV文件"""
    base_name = os.path.splitext(os.path.basename(filename))[0]
    gender_str = gender_filter if gender_filter != '所有' else '全部'
    csv_filename = os.path.join('results', f"{base_name}_支持度{min_support}_{gender_str}.csv")
    
    with open(csv_filename, 'w', encoding='utf-8-sig', newline='') as csvfile:
        writer = csv.writer(csvfile)
        
        # 写入文件头信息
        writer.writerow(['数据源', os.path.basename(filename)])
        writer.writerow(['最小支持度', min_support])
        writer.writerow(['性别筛选', gender_filter])
        writer.writerow(['生成时间', datetime.now().strftime('%Y-%m-%d %H:%M:%S')])
        writer.writerow([])  # 空行
        
        # 写入频繁项集部分
        writer.writerow(['频繁项集结果'])
        writer.writerow(['项集大小', '项集内容', '支持度计数', '支持度(%)'])
        
        # 按项集大小和支持度排序
        itemsets_by_size = defaultdict(list)
        for itemset_info in frequent_itemsets:
            size = len(itemset_info['itemset'])
            itemsets_by_size[size].append(itemset_info)
        
        for size in sorted(itemsets_by_size.keys()):
            for itemset_info in sorted(itemsets_by_size[size], 
                                     key=lambda x: x['support_count'], reverse=True):
                itemset_str = "、".join(sorted(itemset_info['itemset']))
                support_percent = itemset_info['support'] * 100
                writer.writerow([
                    size,
                    itemset_str,
                    itemset_info['support_count'],
                    f"{support_percent:.2f}%"
                ])
        
        # 写入关联规则部分
        writer.writerow([])  # 空行分隔
        writer.writerow(['关联规则结果'])
        writer.writerow(['前件', '后件', '支持度计数', '置信度(%)'])
        
        # 按置信度排序
        for rule in sorted(rules, key=lambda x: x['confidence'], reverse=True):
            antecedent_str = "、".join(sorted(rule['antecedent']))
            consequent_str = "、".join(sorted(rule['consequent']))
            confidence_percent = rule['confidence'] * 100
            writer.writerow([
                antecedent_str,
                consequent_str,
                rule['support_count'],
                f"{confidence_percent:.2f}%"
            ])
    
    return csv_filename


def apriori_mining():
    """主函数：执行Apriori挖掘"""
    files = [os.path.join('data', 'app.csv'), os.path.join('data', 'life.csv')]
    min_supports = [3]  # 最小支持度
    gender_filters = ['男', '女', '所有']
    os.makedirs('results', exist_ok=True)
    for filename in files:
        # 创建文本结果文件
        base_name = os.path.splitext(os.path.basename(filename))[0]
        result_filename = os.path.join('results', f"{base_name}_results.txt")
        result_file = open(result_filename, 'w', encoding='utf-8')
        
        try:
            result_file.write(f"Apriori频繁模式挖掘结果 - {os.path.basename(filename)}\n")
            result_file.write(f"{'='*80}\n")
            result_file.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            result_file.write(f"{'='*80}\n\n")

            patterns, items = parse_file(filename)
            
            if not patterns:
                warning_msg = f"警告: {filename} 没有数据\n"
                result_file.write(warning_msg)
                result_file.close()
                continue
            
            # 处理每个支持度和性别筛选组合
            for min_support in min_supports:
                for gender_filter in gender_filters:
                    # 按性别筛选
                    filtered_patterns = filter_by_gender(patterns, gender_filter)
                    
                    if not filtered_patterns:
                        msg = f"\n{filename} - 支持度{min_support} - {gender_filter}: 无数据\n"
                        result_file.write(msg)
                        continue
                    
                    # 生成频繁项集
                    frequent_itemsets = generation_frequent_itemset(min_support, filtered_patterns)
                    
                    if not frequent_itemsets:
                        msg = f"\n{filename} - 支持度{min_support} - {gender_filter}: 无频繁项集\n"
                        result_file.write(msg)
                        continue
                    
                    # 生成关联规则
                    rules = generate_association_rules(frequent_itemsets, min_confidence=0.5)
                    
                    # 写入文本文件
                    result_text = format_results(os.path.basename(filename), min_support, gender_filter, frequent_itemsets, rules)
                    result_file.write(result_text)
                    
                    # 生成CSV文件
                    write_results_to_csv(filename, min_support, gender_filter, frequent_itemsets, rules)
            
        finally:
            result_file.close()
            print(f"已生成结果文件: {result_filename}")


class StopParsing(Exception):
    """用于停止XML解析的异常"""
    pass


class DBLPHandler(xml.sax.ContentHandler):
    """DBLP XML解析器，提取作者列表"""
    def __init__(self, max_records=20000, max_authors_per_paper=5, progress_callback=None):
        self.max_records = max_records
        self.max_authors_per_paper = max_authors_per_paper
        self.records = []  # 存储每个文章的作者列表
        self.current_data = ""
        self.current_authors = []
        self.in_target_tag = False
        self.record_count = 0
        self.current_author = ""  # 当前正在读取的作者名
        self.target_tags = ['article', 'incollection', 'mastersthesis', 'phdthesis', 'inproceedings', 'book']
        self.progress_callback = progress_callback  # 进度回调函数
    
    def startElement(self, tag, attributes):
        # 如果已达到目标记录数，不再处理新的标签
        if self.record_count >= self.max_records:
            raise StopParsing("已达到目标记录数")
        
        if tag in self.target_tags:
            if self.record_count < self.max_records:
                # 如果之前有未完成的记录，保存它（但至少要有2个作者）
                if len(self.current_authors) >= 2:
                    if len(self.current_authors) > self.max_authors_per_paper:
                        self.current_authors = self.current_authors[:self.max_authors_per_paper]
                    self.records.append(self.current_authors.copy())
                    self.record_count += 1
                    # 检查是否达到目标
                    if self.record_count >= self.max_records:
                        raise StopParsing("已达到目标记录数")
                self.current_authors = []
                self.in_target_tag = True
        self.current_data = tag
        self.current_author = ""  # 重置当前作者名
    
    def endElement(self, tag):
        if tag == "author":
            # 作者标签结束，保存当前作者
            author = self.current_author.strip()
            if author and self.in_target_tag and len(self.current_authors) < self.max_authors_per_paper:
                self.current_authors.append(author)
            self.current_author = ""
        elif tag in self.target_tags:
            # 文章标签结束，如果已经收集到至少2个作者，且未达到上限，保存记录
            if self.record_count < self.max_records and len(self.current_authors) >= 2:
                if len(self.current_authors) > self.max_authors_per_paper:
                    self.current_authors = self.current_authors[:self.max_authors_per_paper]
                self.records.append(self.current_authors.copy())
                self.record_count += 1
                
                # 每处理100条记录或达到上限时显示进度
                if self.progress_callback and (self.record_count % 100 == 0 or self.record_count >= self.max_records):
                    self.progress_callback(self.record_count, self.max_records)
                
                # 达到目标记录数后立即停止解析
                if self.record_count >= self.max_records:
                    raise StopParsing("已达到目标记录数")
            
            self.current_authors = []
            self.in_target_tag = False
        
        self.current_data = ""
    
    def characters(self, content):
        # 如果已达到目标记录数，不再处理内容
        if self.record_count >= self.max_records:
            return
        
        if self.current_data == "author" and self.in_target_tag:
            # 累积作者名字符
            self.current_author += content


class ProgressBar:
    """进度条管理器"""
    def __init__(self, total_steps, prefix="总体进度"):
        self.total_steps = total_steps
        self.current_step = 0
        self.prefix = prefix
        self.bar_length = 40
    
    def update(self, step=None, suffix=""):
        if step is not None:
            self.current_step = step
        else:
            self.current_step += 1
        
        percent = (self.current_step / self.total_steps) * 100 if self.total_steps > 0 else 0
        filled_length = int(self.bar_length * self.current_step // self.total_steps) if self.total_steps > 0 else 0
        bar = '█' * filled_length + '░' * (self.bar_length - filled_length)
        print(f'\r{self.prefix}: [{bar}] {self.current_step}/{self.total_steps} ({percent:.1f}%) {suffix}', end='', flush=True)
        if self.current_step >= self.total_steps:
            print()
    
    def finish(self):
        """完成进度条"""
        self.update(self.total_steps)


def parse_dblp_xml(xml_file, max_records=20000, max_authors=5):
    """解析DBLP XML文件，提取作者列表，达到目标记录数后立即停止"""
    # 创建独立的解析进度条
    parse_progress = ProgressBar(max_records, prefix="XML解析进度")
    parse_progress.update(0, suffix="开始解析XML文件...")
    
    def progress_callback(current, total):
        # 更新解析进度条
        parse_progress.update(current, suffix=f"已解析 {current} 条有效记录")
    
    handler = DBLPHandler(max_records=max_records, max_authors_per_paper=max_authors, 
                         progress_callback=progress_callback)
    
    try:
        parser = xml.sax.make_parser()
        parser.setFeature(xml.sax.handler.feature_namespaces, 0)
        parser.setContentHandler(handler)
        parser.parse(xml_file)
        parse_progress.finish()
        print(f"解析完成，提取 {len(handler.records)} 条有效记录")
    except StopParsing:
        # 达到目标记录数，正常停止
        parse_progress.finish()
        print(f"解析完成，提取 {len(handler.records)} 条有效记录（已提前停止）")
    except Exception as e:
        parse_progress.finish()
        print(f"解析出错: {e}")
        pass

    patterns = []
    items = set()
    
    for idx, authors in enumerate(handler.records):
        if len(authors) >= 2:  # 至少2个作者才形成合著关系
            for author in authors:
                items.add(author)
            # 使用ID作为记录编号，name字段可以用来存储其他信息，这里用空字符串
            pattern = Pattern(idx + 1, "", authors)
            patterns.append(pattern)
    
    return patterns, list(items)


def write_authors_results_to_csv(filename, min_support, frequent_itemsets, rules, max_records=None, max_authors=None):
    """将合著者挖掘结果写入CSV文件"""
    base_name = os.path.splitext(os.path.basename(filename))[0]
    csv_filename = os.path.join('results', f"{base_name}_合著者_支持度{min_support}.csv")
    
    # 使用常量作为默认值
    if max_records is None:
        max_records = DBLP_MAX_RECORDS
    if max_authors is None:
        max_authors = DBLP_MAX_AUTHORS
    
    with open(csv_filename, 'w', encoding='utf-8-sig', newline='') as csvfile:
        writer = csv.writer(csvfile)
        
        # 写入文件头信息
        writer.writerow(['数据源', 'DBLP数据集'])
        writer.writerow(['处理记录数', f'{max_records}条'])
        writer.writerow(['每条记录作者数', f'前{max_authors}个作者'])
        writer.writerow(['最小支持度', min_support])
        writer.writerow(['生成时间', datetime.now().strftime('%Y-%m-%d %H:%M:%S')])
        writer.writerow([])  # 空行
        
        # 写入频繁合著者组部分
        writer.writerow(['频繁合著者组结果'])
        writer.writerow(['组大小', '合著者', '支持度计数', '支持度(%)'])
        
        # 按项集大小和支持度排序
        itemsets_by_size = defaultdict(list)
        for itemset_info in frequent_itemsets:
            size = len(itemset_info['itemset'])
            itemsets_by_size[size].append(itemset_info)
        
        for size in sorted(itemsets_by_size.keys()):
            for itemset_info in sorted(itemsets_by_size[size], 
                                     key=lambda x: x['support_count'], reverse=True):
                authors_str = "、".join(sorted(itemset_info['itemset']))
                support_percent = itemset_info['support'] * 100
                writer.writerow([
                    size,
                    authors_str,
                    itemset_info['support_count'],
                    f"{support_percent:.2f}%"
                ])
        
        # 写入关联规则部分（合著者关联）
        writer.writerow([])  # 空行分隔
        writer.writerow(['合著者关联规则结果'])
        writer.writerow(['前件（作者组）', '后件（作者组）', '支持度计数', '置信度(%)'])
        
        # 按置信度排序
        for rule in sorted(rules, key=lambda x: x['confidence'], reverse=True):
            antecedent_str = "、".join(sorted(rule['antecedent']))
            consequent_str = "、".join(sorted(rule['consequent']))
            confidence_percent = rule['confidence'] * 100
            writer.writerow([
                antecedent_str,
                consequent_str,
                rule['support_count'],
                f"{confidence_percent:.2f}%"
            ])
    
    return csv_filename


def apriori_mining_authors():
    """挖掘DBLP数据集中的合著者关系"""
    # 记录开始时间
    start_time = time.time()
    
    xml_file = os.path.join('data', 'dblp.xml')
    min_support = 3

    max_records = DBLP_MAX_RECORDS
    max_authors = DBLP_MAX_AUTHORS

    os.makedirs('results', exist_ok=True)
    
    print("\n" + "="*80)
    print("开始DBLP合著者关系挖掘")
    print(f"最小支持度: {min_support}")
    print(f"数据规模: 处理{max_records}条记录，每条记录最多{max_authors}个作者")
    print("="*80 + "\n")
    
    # 1.解析XML文件
    print("步骤1: 解析XML文件")
    patterns, items = parse_dblp_xml(xml_file, max_records=max_records, max_authors=max_authors)
    
    if not patterns:
        print("错误: 未能解析到有效数据")
        return

    total_papers = len(patterns)
    total_authors = len(items)
    
    print(f"\n数据统计: 有效论文数={total_papers}, 涉及作者数={total_authors}\n")
    
    # 2. 挖掘过程
    print("步骤2: 挖掘合著者关系")
    
    # 生成频繁项集（按k-项集大小显示进度）
    print("  生成频繁项集...")
    # 使用一个合理的最大k值（比如20）作为进度条上限
    max_k = 20
    itemset_progress = ProgressBar(max_k, prefix="  频繁项集挖掘进度")
    max_actual_k = 1  # 记录实际的最大k值
    
    def itemset_progress_callback(k, message):
        """频繁项集挖掘进度回调"""
        nonlocal max_actual_k
        max_actual_k = max(max_actual_k, k)
        itemset_progress.update(k, suffix=message)
    
    frequent_itemsets = generation_frequent_itemset(min_support, patterns, 
                                                     progress_callback=itemset_progress_callback)
    
    # 完成频繁项集进度条，显示实际的最大k值
    itemset_progress.update(max_actual_k, suffix=f"挖掘完成（最大{max_actual_k}-项频繁项集）")
    itemset_progress.finish()
    
    if not frequent_itemsets:
        print(f"错误: 支持度 {min_support} 下无频繁项集")
        return
    
    print(f"  频繁项集挖掘完成，共找到 {len(frequent_itemsets)} 个频繁项集\n")
    
    # 生成关联规则
    print("  生成关联规则...", end='', flush=True)
    rules = generate_association_rules(frequent_itemsets, min_confidence=0.5)
    print(f" 完成，找到 {len(rules)} 条关联规则\n")
    
    # 写入CSV文件
    print("  写入CSV文件...", end='', flush=True)
    csv_filename = write_authors_results_to_csv(xml_file, min_support, frequent_itemsets, rules, 
                                                  max_records=max_records, max_authors=max_authors)
    print(f" 完成: {csv_filename}\n")
    
    # 同时生成文本摘要文件
    print("  生成摘要文件...", end='', flush=True)
    summary_filename = os.path.join('results', f"dblp_合著者_支持度{min_support}_摘要.txt")
    with open(summary_filename, 'w', encoding='utf-8') as f:
        f.write(f"DBLP合著者关系挖掘结果 - 支持度 {min_support}\n")
        f.write("=" * 80 + "\n")
        f.write(f"处理论文数: {total_papers}\n")
        f.write(f"涉及作者数: {total_authors}\n")
        f.write(f"最小支持度: {min_support}\n")
        f.write(f"频繁合著者组数: {len(frequent_itemsets)}\n")
        f.write(f"关联规则数: {len(rules)}\n")
        f.write("=" * 80 + "\n\n")
        
        # 写入前10个最频繁的合著者组
        f.write("前10个最频繁的合著者组:\n")
        f.write("-" * 80 + "\n")
        sorted_itemsets = sorted(frequent_itemsets, 
                                key=lambda x: x['support_count'], reverse=True)
        for idx, itemset_info in enumerate(sorted_itemsets[:10], 1):
            authors_str = "、".join(sorted(itemset_info['itemset']))
            f.write(f"{idx}. [{authors_str}] - 支持度计数: {itemset_info['support_count']}, "
                   f"支持度: {itemset_info['support']:.2%}\n")
        
        # 写入前10个最高置信度的关联规则
        if rules:
            f.write("\n前10个最高置信度的合著者关联规则:\n")
            f.write("-" * 80 + "\n")
            sorted_rules = sorted(rules, key=lambda x: x['confidence'], reverse=True)
            for idx, rule in enumerate(sorted_rules[:10], 1):
                antecedent_str = "、".join(sorted(rule['antecedent']))
                consequent_str = "、".join(sorted(rule['consequent']))
                f.write(f"{idx}. [{antecedent_str}] => [{consequent_str}] - "
                       f"置信度: {rule['confidence']:.2%}\n")
    print(f"已生成摘要文件: {summary_filename}")
    
    # 计算运行时间
    end_time = time.time()
    elapsed_time = end_time - start_time
    if elapsed_time < 60:
        time_str = f"{elapsed_time:.2f}秒"
    elif elapsed_time < 3600:
        minutes = int(elapsed_time // 60)
        seconds = elapsed_time % 60
        time_str = f"{minutes}分{seconds:.2f}秒"
    else:
        hours = int(elapsed_time // 3600)
        minutes = int((elapsed_time % 3600) // 60)
        seconds = elapsed_time % 60
        time_str = f"{hours}小时{minutes}分{seconds:.2f}秒"
    
    print("\n" + "="*80)
    print("DBLP合著者关系挖掘完成！")
    print(f"数据统计: 有效论文数={total_papers}, 涉及作者数={total_authors}")
    print(f"运行时间: {time_str}")
    print("="*80 + "\n")

    with open(summary_filename, 'a', encoding='utf-8') as f:
        f.write(f"\n运行时间: {time_str}\n")


if __name__ == '__main__':
    apriori_mining()
    apriori_mining_authors()
