import csv
import logging
import os
import dedupe
import dedupe.variables
from dedupe import RecordLink
import matplotlib.pyplot as plt
import numpy as np
from kneed import KneeLocator

#配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
NUM_PROCESSES = 4

left_file = "prescpt_medicine.csv"
right_file = "got_medicine.csv"
output_file = "missing_medicine.csv"
settings_file = "medicine_matching_settings"
training_file = "medicine_matching_training.json"


def preProcess(column):
    if not column:
        return None
    column = column.strip().strip('"').strip("'").lower()
    return column if column else None


def readData(filename, is_left):
    data = {}
    prefix = "left_" if is_left else "right_"
    with open(filename, encoding="utf-8") as f:
        reader = csv.DictReader(f)
        for i, row in enumerate(reader):
            clean_row = {
                "title": preProcess(row.get("title")),
                "alias": preProcess(row.get("alias")) if not is_left else None
            }
            data[f"{prefix}{i}"] = clean_row
    return data


def threshold_tuning(linker, data_1, data_2):
    thresholds = np.linspace(0.3, 0.85, 60)
    counts = []
    avg_scores = []

    logger.info("阈值调优")
    for t in thresholds:
        pairs = linker.join(data_1, data_2, threshold=t)
        if pairs:
            score_list = [score for _, score in pairs]
            counts.append(len(score_list))
            avg_scores.append(np.mean(score_list))
        else:
            counts.append(0)
            avg_scores.append(0)

    kneedle = KneeLocator(thresholds, counts, curve="concave", direction="decreasing", S=1, interp_method="polynomial",
        polynomial_degree=5)
    best_threshold = kneedle.knee or 0.8

    plt.figure(figsize=(10, 6))
    plt.plot(thresholds, counts, label="Match Count", color='blue')
    plt.plot(thresholds, avg_scores, label="Average Match Score", color='orange')
    plt.axvline(best_threshold, color='red', linestyle='--', label=f"Optimal Threshold: {best_threshold:.2f}")
    plt.scatter([best_threshold], [counts[thresholds.tolist().index(best_threshold)]], color='red')
    plt.xlabel("Threshold")
    plt.ylabel("Value")
    plt.title("Threshold vs Match Count")
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.savefig("threshold_tuning.png")
    logger.info(f"最佳阈值为 {best_threshold:.2f}，图表已保存为 threshold_tuning.png")

    return best_threshold


#多阶段匹配器
class HierarchicalMatcher:
    def __init__(self, data_1, data_2, threshold):
        self.data_1 = data_1
        self.data_2 = data_2
        self.threshold = threshold
        self.matches = []

    def _exact_match(self):
        exact_matches = []
        title_to_id = {v["title"]: k for k, v in self.data_1.items()}
        for rid, record in self.data_2.items():
            title = record["title"]
            if title and title in title_to_id:
                exact_matches.append((title_to_id[title], rid, 1.0))
        return exact_matches

    def _dedupe_match(self, linker):
        return linker.join(self.data_1, self.data_2, threshold=self.threshold)

    def run(self, linker):
        logger.info("精确匹配")
        exact = self._exact_match()
        self.matches.extend([(l, r, s) for l, r, s in exact])
        matched_ids = {l for l, _, _ in exact}

        self.data_1 = {k: v for k, v in self.data_1.items() if k not in matched_ids}

        logger.info("dedupe模型匹配")
        deduped = self._dedupe_match(linker)
        self.matches.extend([
            (l, r, score)
            for cluster, score in deduped
            for l in cluster if l.startswith("left_")
            for r in cluster if r.startswith("right_")
        ])
        return self.matches


#模型训练与加载
def train_or_load_model(data_1, data_2):
    if os.path.exists(settings_file):
        logger.info("加载已训练模型设置")
        with open(settings_file, "rb") as sf:
            return dedupe.StaticRecordLink(sf)

    logger.info("初始化模型并准备训练")
    fields = [
        dedupe.variables.String("title"),
        dedupe.variables.Text("alias", has_missing=True)
    ]
    linker = RecordLink(fields)

    linker.prepare_training(data_1, data_2, sample_size=min(len(data_1), len(data_2), 10000))

    logger.info("开始人工标注")
    dedupe.console_label(linker)

    logger.info("训练模型中")
    linker.train()

    logger.info("保存模型设置与训练数据")
    with open(settings_file, "wb") as sf:
        linker.write_settings(sf)
    with open(training_file, "w") as tf:
        linker.write_training(tf)
    return linker


if __name__ == "__main__":
    data_1 = readData(left_file, is_left=True)
    data_2 = readData(right_file, is_left=False)

    linker = train_or_load_model(data_1, data_2)

    #使用最优阈值（匹配对数拐点）
    best_threshold = threshold_tuning(linker, data_1, data_2)

    matcher = HierarchicalMatcher(data_1, data_2, threshold=best_threshold)
    matches = matcher.run(linker)

    matched_left_ids = {l for l, r, s in matches}

    logger.info("生成未匹配药材报告")
    missing_titles = [
        record["title"]
        for rid, record in data_1.items()
        if rid not in matched_left_ids and record["title"]
    ]

    with open(output_file, "w", newline='', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["title"])
        for title in sorted(set(missing_titles)):
            writer.writerow([title])

    logger.info(f"共找到 {len(missing_titles)} 个未匹配药材，结果已保存至 {output_file}")
