import argparse
from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

def parse_args():
    """
    解析命令行参数
    """
    parser = argparse.ArgumentParser(description='使用BERT模型筛选AI相关标题')
    parser.add_argument('--input', '-i', 
                        default='scrapy/all_data/all_titles.txt',
                        help='输入标题文件路径 (默认: scrapy/all_data/all_titles.txt)')
    parser.add_argument('--output', '-o', 
                        default='filter/init_filter/ai_titles_v1.txt',
                        help='输出文件路径 (默认: filter/init_filter/ai_titles_v1.txt)')
    parser.add_argument('--threshold', '-t', 
                        type=float, 
                        default=0.3,
                        help='相似度阈值 (默认: 0.3)')
    parser.add_argument('--workers', '-w', 
                        type=int, 
                        default=8,
                        help='线程数 (默认: 8)')
    parser.add_argument('--model', '-m', 
                        default='paraphrase-multilingual-MiniLM-L12-v2',
                        help='使用的SentenceTransformer模型 (默认: paraphrase-multilingual-MiniLM-L12-v2)')
    
    return parser.parse_args()

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, model_name, threshold=0.3, max_workers=8):
    model = SentenceTransformer(model_name)
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
    return filtered

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 读取标题
    try:
        with open(args.input, 'r', encoding='utf-8') as f:
            titles = [line.strip() for line in f if line.strip()]
        print(f"共读取到{len(titles)}个标题")
    except FileNotFoundError:
        print(f"错误：找不到输入文件 {args.input}")
        return
    except Exception as e:
        print(f"读取文件时发生错误：{e}")
        return

    # 过滤AI相关标题
    try:
        filtered_titles = filter_ai_titles(
            titles, 
            model_name=args.model,
            threshold=args.threshold, 
            max_workers=args.workers
        )
    except Exception as e:
        print(f"处理标题时发生错误：{e}")
        return

    # 输出结果
    print("与人工智能相关的标题：")
    for title, sim in filtered_titles:
        print(f"{title}（相似度：{sim:.2f}）")

    # 写入到新文件（只保留标题）
    try:
        with open(args.output, 'w', encoding='utf-8') as f:
            for title, sim in filtered_titles:
                f.write(f"{title}\n")
        print(f"筛选后的AI相关标题已写入 {args.output}")
        print(f"共筛选出 {len(filtered_titles)} 个标题")
    except Exception as e:
        print(f"写入文件时发生错误：{e}")

if __name__ == "__main__":
    main()import argparse
import logging
import time
from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志记录
def setup_logging():
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('ai_titles_filter.log', encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)

def parse_args():
    """
    解析命令行参数
    """
    parser = argparse.ArgumentParser(description='使用BERT模型筛选AI相关标题')
    parser.add_argument('--input', '-i', 
                        default='scrapy/all_data/all_titles.txt',
                        help='输入标题文件路径 (默认: scrapy/all_data/all_titles.txt)')
    parser.add_argument('--output', '-o', 
                        default='filter/init_filter/ai_titles_v1.txt',
                        help='输出文件路径 (默认: filter/init_filter/ai_titles_v1.txt)')
    parser.add_argument('--threshold', '-t', 
                        type=float, 
                        default=0.3,
                        help='相似度阈值 (默认: 0.3)')
    parser.add_argument('--workers', '-w', 
                        type=int, 
                        default=8,
                        help='线程数 (默认: 8)')
    parser.add_argument('--model', '-m', 
                        default='paraphrase-multilingual-MiniLM-L12-v2',
                        help='使用的SentenceTransformer模型 (默认: paraphrase-multilingual-MiniLM-L12-v2)')
    parser.add_argument('--log-level', 
                        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
                        default='INFO',
                        help='日志级别 (默认: INFO)')
    
    return parser.parse_args()

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, model_name, threshold=0.3, max_workers=8, logger=None):
    start_time = time.time()
    if logger:
        logger.info(f"开始加载模型: {model_name}")
    
    model = SentenceTransformer(model_name)
    
    if logger:
        logger.info("模型加载完成")
        logger.info("开始编码AI关键词")
    
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []
    
    if logger:
        logger.info(f"开始处理 {len(titles)} 个标题，使用 {max_workers} 个线程")
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        completed = 0
        
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
            
            completed += 1
            if logger and completed % 100 == 0:  # 每处理100个标题记录一次进度
                logger.info(f"已处理 {completed}/{len(titles)} 个标题")
    
    if logger:
        elapsed_time = time.time() - start_time
        logger.info(f"处理完成，耗时 {elapsed_time:.2f} 秒")
        logger.info(f"找到 {len(filtered)} 个AI相关标题，平均相似度阈值: {threshold}")
    
    return filtered

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 设置日志级别
    logger = setup_logging()
    logger.setLevel(getattr(logging, args.log_level))
    
    logger.info("开始执行AI标题筛选程序")
    logger.info(f"输入文件: {args.input}")
    logger.info(f"输出文件: {args.output}")
    logger.info(f"相似度阈值: {args.threshold}")
    logger.info(f"线程数: {args.workers}")
    logger.info(f"使用模型: {args.model}")
    
    # 读取标题
    try:
        logger.info("开始读取标题文件")
        with open(args.input, 'r', encoding='utf-8') as f:
            titles = [line.strip() for line in f if line.strip()]
        logger.info(f"共读取到 {len(titles)} 个标题")
    except FileNotFoundError:
        logger.error(f"找不到输入文件 {args.input}")
        print(f"错误：找不到输入文件 {args.input}")
        return
    except Exception as e:
        logger.error(f"读取文件时发生错误：{e}")
        print(f"读取文件时发生错误：{e}")
        return

    # 过滤AI相关标题
    try:
        logger.info("开始筛选AI相关标题")
        filtered_titles = filter_ai_titles(
            titles, 
            model_name=args.model,
            threshold=args.threshold, 
            max_workers=args.workers,
            logger=logger
        )
    except Exception as e:
        logger.error(f"处理标题时发生错误：{e}")
        print(f"处理标题时发生错误：{e}")
        return

    # 输出结果
    logger.info("筛选完成，准备输出结果")
    print("与人工智能相关的标题：")
    for title, sim in filtered_titles:
        print(f"{title}（相似度：{sim:.2f}）")

    # 写入到新文件（只保留标题）
    try:
        logger.info(f"开始写入结果到文件: {args.output}")
        with open(args.output, 'w', encoding='utf-8') as f:
            for title, sim in filtered_titles:
                f.write(f"{title}\n")
        logger.info(f"筛选后的AI相关标题已写入 {args.output}")
        logger.info(f"共筛选出 {len(filtered_titles)} 个标题")
        print(f"筛选后的AI相关标题已写入 {args.output}")
        print(f"共筛选出 {len(filtered_titles)} 个标题")
    except Exception as e:
        logger.error(f"写入文件时发生错误：{e}")
        print(f"写入文件时发生错误：{e}")

if __name__ == "__main__":
    main()import argparse
import logging
import time
from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志记录
def setup_logging():
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('ai_titles_filter.log', encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)

def parse_args():
    """
    解析命令行参数
    """
    parser = argparse.ArgumentParser(description='使用BERT模型筛选AI相关标题')
    parser.add_argument('--input', '-i', 
                        default='scrapy/all_data/all_titles.txt',
                        help='输入标题文件路径 (默认: scrapy/all_data/all_titles.txt)')
    parser.add_argument('--output', '-o', 
                        default='filter/init_filter/ai_titles_v1.txt',
                        help='输出文件路径 (默认: filter/init_filter/ai_titles_v1.txt)')
    parser.add_argument('--threshold', '-t', 
                        type=float, 
                        default=0.3,
                        help='相似度阈值 (默认: 0.3)')
    parser.add_argument('--workers', '-w', 
                        type=int, 
                        default=8,
                        help='线程数 (默认: 8)')
    parser.add_argument('--model', '-m', 
                        default='paraphrase-multilingual-MiniLM-L12-v2',
                        help='使用的SentenceTransformer模型 (默认: paraphrase-multilingual-MiniLM-L12-v2)')
    parser.add_argument('--log-level', 
                        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
                        default='INFO',
                        help='日志级别 (默认: INFO)')
    
    return parser.parse_args()

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, model_name, threshold=0.3, max_workers=8, logger=None):
    start_time = time.time()
    if logger:
        logger.info(f"开始加载模型: {model_name}")
    
    model = SentenceTransformer(model_name)
    
    if logger:
        logger.info("模型加载完成")
        logger.info("开始编码AI关键词")
    
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []
    
    if logger:
        logger.info(f"开始处理 {len(titles)} 个标题，使用 {max_workers} 个线程")
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        completed = 0
        
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
            
            completed += 1
            if logger and completed % 100 == 0:  # 每处理100个标题记录一次进度
                logger.info(f"已处理 {completed}/{len(titles)} 个标题")
    
    if logger:
        elapsed_time = time.time() - start_time
        logger.info(f"处理完成，耗时 {elapsed_time:.2f} 秒")
        logger.info(f"找到 {len(filtered)} 个AI相关标题，平均相似度阈值: {threshold}")
    
    return filtered

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 设置日志级别
    logger = setup_logging()
    logger.setLevel(getattr(logging, args.log_level))
    
    logger.info("开始执行AI标题筛选程序")
    logger.info(f"输入文件: {args.input}")
    logger.info(f"输出文件: {args.output}")
    logger.info(f"相似度阈值: {args.threshold}")
    logger.info(f"线程数: {args.workers}")
    logger.info(f"使用模型: {args.model}")
    
    # 读取标题
    try:
        logger.info("开始读取标题文件")
        with open(args.input, 'r', encoding='utf-8') as f:
            titles = [line.strip() for line in f if line.strip()]
        logger.info(f"共读取到 {len(titles)} 个标题")
    except FileNotFoundError:
        logger.error(f"找不到输入文件 {args.input}")
        print(f"错误：找不到输入文件 {args.input}")
        return
    except Exception as e:
        logger.error(f"读取文件时发生错误：{e}")
        print(f"读取文件时发生错误：{e}")
        return

    # 过滤AI相关标题
    try:
        logger.info("开始筛选AI相关标题")
        filtered_titles = filter_ai_titles(
            titles, 
            model_name=args.model,
            threshold=args.threshold, 
            max_workers=args.workers,
            logger=logger
        )
    except Exception as e:
        logger.error(f"处理标题时发生错误：{e}")
        print(f"处理标题时发生错误：{e}")
        return

    # 输出结果
    logger.info("筛选完成，准备输出结果")
    print("与人工智能相关的标题：")
    for title, sim in filtered_titles:
        print(f"{title}（相似度：{sim:.2f}）")

    # 写入到新文件（只保留标题）
    try:
        logger.info(f"开始写入结果到文件: {args.output}")
        with open(args.output, 'w', encoding='utf-8') as f:
            for title, sim in filtered_titles:
                f.write(f"{title}\n")
        logger.info(f"筛选后的AI相关标题已写入 {args.output}")
        logger.info(f"共筛选出 {len(filtered_titles)} 个标题")
        print(f"筛选后的AI相关标题已写入 {args.output}")
        print(f"共筛选出 {len(filtered_titles)} 个标题")
    except Exception as e:
        logger.error(f"写入文件时发生错误：{e}")
        print(f"写入文件时发生错误：{e}")

if __name__ == "__main__":
    main()from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

# 读取scrapy/all_data/all_titles.txt中的标题
with open('scrapy/all_data/all_titles.txt', 'r', encoding='utf-8') as f:
    titles = [line.strip() for line in f if line.strip()]

print(f"共读取到{len(titles)}个标题")

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, threshold=0.3, max_workers=8):
    model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
    return filtered

filtered_titles = filter_ai_titles(titles, threshold=0.3, max_workers=8)
print("与人工智能相关的标题：")
for title, sim in filtered_titles:
    print(f"{title}（相似度：{sim:.2f}）")

# 写入到新文件（只保留标题）
with open('filter/init_filter/ai_titles_v1.txt', 'w', encoding='utf-8') as f:
    for title, sim in filtered_titles:
        f.write(f"{title}\n")
print("筛选后的AI相关标题已写入")
import argparse
import logging
import time
from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志记录
def setup_logging():
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('ai_titles_filter.log', encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)

def parse_args():
    """
    解析命令行参数
    """
    parser = argparse.ArgumentParser(description='使用BERT模型筛选AI相关标题')
    parser.add_argument('--input', '-i', 
                        default='scrapy/all_data/all_titles.txt',
                        help='输入标题文件路径 (默认: scrapy/all_data/all_titles.txt)')
    parser.add_argument('--output', '-o', 
                        default='filter/init_filter/ai_titles_v1.txt',
                        help='输出文件路径 (默认: filter/init_filter/ai_titles_v1.txt)')
    parser.add_argument('--threshold', '-t', 
                        type=float, 
                        default=0.3,
                        help='相似度阈值 (默认: 0.3)')
    parser.add_argument('--workers', '-w', 
                        type=int, 
                        default=8,
                        help='线程数 (默认: 8)')
    parser.add_argument('--model', '-m', 
                        default='paraphrase-multilingual-MiniLM-L12-v2',
                        help='使用的SentenceTransformer模型 (默认: paraphrase-multilingual-MiniLM-L12-v2)')
    parser.add_argument('--log-level', 
                        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
                        default='INFO',
                        help='日志级别 (默认: INFO)')
    
    return parser.parse_args()

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, model_name, threshold=0.3, max_workers=8, logger=None):
    start_time = time.time()
    if logger:
        logger.info(f"开始加载模型: {model_name}")
    
    model = SentenceTransformer(model_name)
    
    if logger:
        logger.info("模型加载完成")
        logger.info("开始编码AI关键词")
    
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []
    
    if logger:
        logger.info(f"开始处理 {len(titles)} 个标题，使用 {max_workers} 个线程")
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        completed = 0
        
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
            
            completed += 1
            if logger and completed % 100 == 0:  # 每处理100个标题记录一次进度
                logger.info(f"已处理 {completed}/{len(titles)} 个标题")
    
    if logger:
        elapsed_time = time.time() - start_time
        logger.info(f"处理完成，耗时 {elapsed_time:.2f} 秒")
        logger.info(f"找到 {len(filtered)} 个AI相关标题，平均相似度阈值: {threshold}")
    
    return filtered

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 设置日志级别
    logger = setup_logging()
    logger.setLevel(getattr(logging, args.log_level))
    
    logger.info("开始执行AI标题筛选程序")
    logger.info(f"输入文件: {args.input}")
    logger.info(f"输出文件: {args.output}")
    logger.info(f"相似度阈值: {args.threshold}")
    logger.info(f"线程数: {args.workers}")
    logger.info(f"使用模型: {args.model}")
    
    # 读取标题
    try:
        logger.info("开始读取标题文件")
        with open(args.input, 'r', encoding='utf-8') as f:
            titles = [line.strip() for line in f if line.strip()]
        logger.info(f"共读取到 {len(titles)} 个标题")
    except FileNotFoundError:
        logger.error(f"找不到输入文件 {args.input}")
        print(f"错误：找不到输入文件 {args.input}")
        return
    except Exception as e:
        logger.error(f"读取文件时发生错误：{e}")
        print(f"读取文件时发生错误：{e}")
        return

    # 过滤AI相关标题
    try:
        logger.info("开始筛选AI相关标题")
        filtered_titles = filter_ai_titles(
            titles, 
            model_name=args.model,
            threshold=args.threshold, 
            max_workers=args.workers,
            logger=logger
        )
    except Exception as e:
        logger.error(f"处理标题时发生错误：{e}")
        print(f"处理标题时发生错误：{e}")
        return

    # 输出结果
    logger.info("筛选完成，准备输出结果")
    print("与人工智能相关的标题：")
    for title, sim in filtered_titles:
        print(f"{title}（相似度：{sim:.2f}）")

    # 写入到新文件（只保留标题）
    try:
        logger.info(f"开始写入结果到文件: {args.output}")
        with open(args.output, 'w', encoding='utf-8') as f:
            for title, sim in filtered_titles:
                f.write(f"{title}\n")
        logger.info(f"筛选后的AI相关标题已写入 {args.output}")
        logger.info(f"共筛选出 {len(filtered_titles)} 个标题")
        print(f"筛选后的AI相关标题已写入 {args.output}")
        print(f"共筛选出 {len(filtered_titles)} 个标题")
    except Exception as e:
        logger.error(f"写入文件时发生错误：{e}")
        print(f"写入文件时发生错误：{e}")

if __name__ == "__main__":
    main()
    import argparse
import logging
import time
from sentence_transformers import SentenceTransformer, util
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志记录
def setup_logging():
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler('ai_titles_filter.log', encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)

def parse_args():
    """
    解析命令行参数
    """
    parser = argparse.ArgumentParser(description='使用BERT模型筛选AI相关标题')
    parser.add_argument('--input', '-i', 
                        default='scrapy/all_data/all_titles.txt',
                        help='输入标题文件路径 (默认: scrapy/all_data/all_titles.txt)')
    parser.add_argument('--output', '-o', 
                        default='filter/init_filter/ai_titles_v1.txt',
                        help='输出文件路径 (默认: filter/init_filter/ai_titles_v1.txt)')
    parser.add_argument('--threshold', '-t', 
                        type=float, 
                        default=0.3,
                        help='相似度阈值 (默认: 0.3)')
    parser.add_argument('--workers', '-w', 
                        type=int, 
                        default=8,
                        help='线程数 (默认: 8)')
    parser.add_argument('--model', '-m', 
                        default='paraphrase-multilingual-MiniLM-L12-v2',
                        help='使用的SentenceTransformer模型 (默认: paraphrase-multilingual-MiniLM-L12-v2)')
    parser.add_argument('--log-level', 
                        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
                        default='INFO',
                        help='日志级别 (默认: INFO)')
    parser.add_argument('--list-models', 
                        action='store_true',
                        help='列出支持的模型')
    
    return parser.parse_args()

# 预定义的模型列表
SUPPORTED_MODELS = {
    'default': 'paraphrase-multilingual-MiniLM-L12-v2',
    'multilingual': 'paraphrase-multilingual-MiniLM-L12-v2',
    'chinese': 'bert-base-chinese',
    'english': 'all-MiniLM-L6-v2',
    'large': 'paraphrase-multilingual-mpnet-base-v2',
    'distiluse': 'distiluse-base-multilingual-cased-v2'
}

def list_supported_models():
    """
    列出支持的模型
    """
    print("支持的模型:")
    print("-" * 50)
    for key, model_name in SUPPORTED_MODELS.items():
        print(f"{key:15} : {model_name}")
    print("-" * 50)
    print("也可以直接指定HuggingFace上的任何SentenceTransformer模型")

def get_model_name(model_arg):
    """
    根据参数获取模型名称
    """
    # 如果是预定义的模型别名
    if model_arg in SUPPORTED_MODELS:
        return SUPPORTED_MODELS[model_arg]
    # 否则直接使用参数作为模型名称
    return model_arg

def compute_similarity(model, ai_vec, title):
    title_vec = model.encode(title, convert_to_tensor=True)
    sim = util.cos_sim(title_vec, ai_vec).max().item()
    return (title, sim)

def filter_ai_titles(titles, model_name, threshold=0.3, max_workers=8, logger=None):
    start_time = time.time()
    if logger:
        logger.info(f"开始加载模型: {model_name}")
    
    model = SentenceTransformer(model_name)
    
    if logger:
        logger.info("模型加载完成")
        logger.info("开始编码AI关键词")
    
    ai_keywords = ["人工智能", "AI", "大模型", "深度学习","机器人","多模态","AI Agent","OpenAI","ChatGPT","AGI"]
    ai_vec = model.encode(ai_keywords, convert_to_tensor=True)
    filtered = []
    
    if logger:
        logger.info(f"开始处理 {len(titles)} 个标题，使用 {max_workers} 个线程")
    
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(compute_similarity, model, ai_vec, title) for title in titles]
        completed = 0
        
        for future in as_completed(futures):
            title, sim = future.result()
            if sim > threshold:
                filtered.append((title, sim))
            
            completed += 1
            if logger and completed % 100 == 0:  # 每处理100个标题记录一次进度
                logger.info(f"已处理 {completed}/{len(titles)} 个标题")
    
    if logger:
        elapsed_time = time.time() - start_time
        logger.info(f"处理完成，耗时 {elapsed_time:.2f} 秒")
        logger.info(f"找到 {len(filtered)} 个AI相关标题，平均相似度阈值: {threshold}")
    
    return filtered

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 如果用户只想列出支持的模型
    if args.list_models:
        list_supported_models()
        return
    
    # 设置日志级别
    logger = setup_logging()
    logger.setLevel(getattr(logging, args.log_level))
    
    # 获取实际的模型名称
    model_name = get_model_name(args.model)
    
    logger.info("开始执行AI标题筛选程序")
    logger.info(f"输入文件: {args.input}")
    logger.info(f"输出文件: {args.output}")
    logger.info(f"相似度阈值: {args.threshold}")
    logger.info(f"线程数: {args.workers}")
    logger.info(f"使用模型别名: {args.model}")
    logger.info(f"实际使用模型: {model_name}")
    
    # 读取标题
    try:
        logger.info("开始读取标题文件")
        with open(args.input, 'r', encoding='utf-8') as f:
            titles = [line.strip() for line in f if line.strip()]
        logger.info(f"共读取到 {len(titles)} 个标题")
    except FileNotFoundError:
        logger.error(f"找不到输入文件 {args.input}")
        print(f"错误：找不到输入文件 {args.input}")
        return
    except Exception as e:
        logger.error(f"读取文件时发生错误：{e}")
        print(f"读取文件时发生错误：{e}")
        return

    # 过滤AI相关标题
    try:
        logger.info("开始筛选AI相关标题")
        filtered_titles = filter_ai_titles(
            titles, 
            model_name=model_name,
            threshold=args.threshold, 
            max_workers=args.workers,
            logger=logger
        )
    except Exception as e:
        logger.error(f"处理标题时发生错误：{e}")
        print(f"处理标题时发生错误：{e}")
        return

    # 输出结果
    logger.info("筛选完成，准备输出结果")
    print("与人工智能相关的标题：")
    for title, sim in filtered_titles:
        print(f"{title}（相似度：{sim:.2f}）")

    # 写入到新文件（只保留标题）
    try:
        logger.info(f"开始写入结果到文件: {args.output}")
        with open(args.output, 'w', encoding='utf-8') as f:
            for title, sim in filtered_titles:
                f.write(f"{title}\n")
        logger.info(f"筛选后的AI相关标题已写入 {args.output}")
        logger.info(f"共筛选出 {len(filtered_titles)} 个标题")
        print(f"筛选后的AI相关标题已写入 {args.output}")
        print(f"共筛选出 {len(filtered_titles)} 个标题")
    except Exception as e:
        logger.error(f"写入文件时发生错误：{e}")
        print(f"写入文件时发生错误：{e}")

if __name__ == "__main__":
    main()