import csv
import random
from sklearn.metrics import precision_score, recall_score, f1_score
from dedupe import StaticRecordLink

settings_file = "medicine_matching_settings"
prescpt_file = "prescpt_medicine.csv"

left_output = "validation_left.csv"
right_output = "validation_right.csv"
truth_output = "validation_ground_truth.csv"

def preProcess(s):
    if not s:
        return None
    return s.strip().strip('"').strip("'").lower() or None

# 错别字/别名模拟器
def generate_alias(title: str) -> str:
    if not title:
        return None
    mode = random.choice(["typo", "insert_space", "delete_char"])
    if mode == "typo" and len(title) > 1:
        pos = random.randint(0, len(title) - 1)
        typo_char = chr(ord(title[pos]) + 1)  # 用下一个Unicode字符模拟错别字
        return title[:pos] + typo_char + title[pos + 1:]
    elif mode == "insert_space" and len(title) > 1:
        pos = random.randint(1, len(title) - 1)
        return title[:pos] + " " + title[pos:]
    elif mode == "delete_char" and len(title) > 1:
        pos = random.randint(0, len(title) - 1)
        return title[:pos] + title[pos + 1:]
    else:
        return f"{title}_alias"

def build_validation_data(filename, sample_size=30, forced_matches=5):
    rows = []
    with open(filename, encoding="utf-8") as f:
        reader = csv.DictReader(f)
        for i, row in enumerate(reader):
            title = preProcess(row.get("title"))
            if title:
                rows.append((i, title))

    total_rows = len(rows)
    sampled_left = random.sample(rows, sample_size)
    sampled_right = random.sample(rows, sample_size)
    forced_match_samples = random.sample(rows, min(forced_matches, total_rows))

    left_data, right_data, ground_truth = {}, {}, set()

    for i, (index, title) in enumerate(sampled_left):
        left_id = f"left_{i}"
        left_data[left_id] = {"title": title, "alias": None, "original_index": index}

    for i, (index, title) in enumerate(sampled_right):
        right_id = f"right_{i}"
        right_data[right_id] = {
            "title": title,
            "alias": generate_alias(title),
            "original_index": index
        }

    for i, (index, title) in enumerate(forced_match_samples):
        lid = f"left_forced_{i}"
        rid = f"right_forced_{i}"
        left_data[lid] = {"title": title, "alias": None, "original_index": index}
        right_data[rid] = {
            "title": title,
            "alias": generate_alias(title),
            "original_index": index
        }
        ground_truth.add((lid, rid))

    for l_id, l_rec in left_data.items():
        for r_id, r_rec in right_data.items():
            if l_rec["original_index"] == r_rec["original_index"]:
                ground_truth.add((l_id, r_id))

    # 保存验证集 CSV 文件
    with open(left_output, "w", newline='', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["left_id", "title", "alias", "original_index"])
        for k, v in left_data.items():
            writer.writerow([k, v["title"], v["alias"], v["original_index"]])

    with open(right_output, "w", newline='', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["right_id", "title", "alias", "original_index"])
        for k, v in right_data.items():
            writer.writerow([k, v["title"], v["alias"], v["original_index"]])

    with open(truth_output, "w", newline='', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["left_id", "right_id", "match"])
        for l_id in left_data:
            for r_id in right_data:
                match = 1 if (l_id, r_id) in ground_truth else 0
                writer.writerow([l_id, r_id, match])

    return left_data, right_data, ground_truth


def evaluate_model(left_data, right_data, ground_truth):
    with open(settings_file, "rb") as sf:
        linker = StaticRecordLink(sf)

    linked_records = linker.join(left_data, right_data, threshold=0.5)

    predicted_matches = set()
    for cluster, score in linked_records:
        left_ids = [rid for rid in cluster if rid.startswith("left")]
        right_ids = [rid for rid in cluster if rid.startswith("right")]
        for l_id in left_ids:
            for r_id in right_ids:
                predicted_matches.add((l_id, r_id))

    all_pairs = list(set(ground_truth | predicted_matches))
    y_true = [1 if pair in ground_truth else 0 for pair in all_pairs]
    y_pred = [1 if pair in predicted_matches else 0 for pair in all_pairs]

    precision = precision_score(y_true, y_pred, zero_division=0)
    recall = recall_score(y_true, y_pred, zero_division=0)
    f1 = f1_score(y_true, y_pred, zero_division=0)

    print(f"验证样本对总数：{len(all_pairs)}")
    print(f"模型准确率（Precision）: {precision:.4f}")
    print(f"模型召回率（Recall）:    {recall:.4f}")
    print(f"F1 值:                 {f1:.4f}")
    print("验证集已保存为:")
    print(f"- 左表: {left_output}")
    print(f"- 右表: {right_output}")
    print(f"- 标签: {truth_output}")

if __name__ == "__main__":
    sample_size = 100
    forced_matches = 100
    left_data, right_data, ground_truth = build_validation_data(
        prescpt_file, sample_size, forced_matches)
    evaluate_model(left_data, right_data, ground_truth)
