from pprint import pprint

"""
# 提取数字
text_lines = ["Price: $100", "Weight: 2.5kg", "Count: 25 items", ""]
numbers = [
    float("".join(c for c in line if c.isdigit() or c == "."))
    for line in text_lines
    if any(c.isdigit() for c in line)
]
numbers1 = [
    list(c for c in line if c.isdigit() or c == ".")
    for line in text_lines
    if any(c.isdigit() for c in line)
]
print(numbers)  # [100.0, 2.5, 25.0]
print(numbers1)  # [100.0, 2.5, 25.0]

line = " Price: $100   "
print({i: line for i, line in enumerate(text_lines) if line.strip()})

import timeit


def test_performance():
    n = 10000

    # 列表推导式
    list_comp_time = timeit.timeit(
        "[x**2 for x in range(n)]", globals={"n": n}, number=1000
    )

    # 传统 for 循环
    for_loop_time = timeit.timeit(
        "result = []\nfor x in range(n):\n    result.append(x**2)",
        globals={"n": n},
        number=1000,
    )

    # 生成器表达式
    gen_expr_time = timeit.timeit(
        "list(x**2 for x in range(n))", globals={"n": n}, number=1000
    )

    print(f"列表推导式: {list_comp_time:.4f}秒")
    print(f"传统循环: {for_loop_time:.4f}秒")
    print(f"生成器表达式: {gen_expr_time:.4f}秒")


test_performance()



def data_processing_pipeline(data):
    "多步骤数据处理"
    # 步骤1: 清理和验证
    cleaned = [item.strip().lower() for item in data if item.strip()]

    # 步骤2: 过滤和转换
    processed = [
        f"processed_{item}"
        for item in cleaned
        if len(item) > 3 and not item.startswith("test")
    ]

    # 步骤3: 创建查找字典
    lookup = {item: len(item) for item in processed}

    return cleaned, processed, lookup


# 使用管道
raw_data = ["  Hello  ", "  test_data  ", "WORLD  ", "  py  ", "  "]
cleaned, processed, lookup = data_processing_pipeline(raw_data)

print("Cleaned:", cleaned)  # ['hello', 'test_data', 'world', 'py']
print("Processed:", processed)  # ['processed_hello', 'processed_world']
print("Lookup:", lookup)  # {'processed_hello': 15, 'processed_world': 15}




def analyze_sales_data(sales_records):
    "分析销售数据"

    # 数据清理：筛选出有效的记录
    valid_records = [
        record
        for record in sales_records
        if record.get("amount", 0) > 0 and record.get("product")
    ]

    # 创建按产品分组的销售总额字典
    sales_by_product = {}
    for record in valid_records:
        product = record["product"]
        amount = record["amount"]
        sales_by_product[product] = sales_by_product.get(product, 0) + amount

    # 找出高销售额的产品
    high_sales_products = [
        product for product, total in sales_by_product.items() if total > 1000
    ]

    # 创建销售报告
    sales_report = {
        product: {
            "total_sales": total,
            "average_sale": total
            / len([r for r in valid_records if r["product"] == product]),
            "is_high_sales": total > 1000,
        }
        for product, total in sales_by_product.items()
    }

    return {
        "total_records": len(valid_records),
        "high_sales_products": high_sales_products,
        "sales_report": sales_report,
    }


# 示例数据
sample_sales = [
    {"product": "A", "amount": 100},
    {"product": "B", "amount": 200},
    {"product": "A", "amount": 150},
    {"product": "C", "amount": 300},
    {"product": "B", "amount": 250},
    {"product": "A", "amount": 1200},  # 高销售额
]

# 分析结果
result = analyze_sales_data(sample_sales)
pprint(result)
"""


def process_text_data(texts):
    """处理文本数据"""

    # 1. 清理文本
    cleaned_texts = [text.strip().lower() for text in texts if text.strip()]

    # 2. 提取关键词
    keywords = [
        word for text in cleaned_texts for word in text.split() if len(word) > 3
    ]

    # 3. 统计词频
    word_freq = {word: keywords.count(word) for word in set(keywords)}

    # 4. 找出高频词
    high_freq_words = [word for word, freq in word_freq.items() if freq > 1]

    return {
        "cleaned_texts": cleaned_texts,
        "keywords": keywords,
        "word_frequency": word_freq,
        "high_frequency_words": high_freq_words,
    }


# 使用示例
texts = [
    "Python is great for data analysis",
    "Data analysis with Python is powerful",
    "Python programming is fun and useful",
]

result = process_text_data(texts)
pprint(result)
# pprint("清理后的文本:", result["cleaned_texts"])
# pprint("关键词:", result["keywords"])
# pprint("词频统计:", result["word_frequency"])
# pprint("高频词:", result["high_frequency_words"])
