import csv
import os
import logging
import optparse
import dedupe


def preProcess(column):
    if not column:
        return None
    column = column.strip().strip('"').strip("'").lower()
    return column if column else None


def readData(filename, is_left):
    data_d = {}
    with open(filename, encoding="utf-8") as f:
        reader = csv.DictReader(f)
        for i, row in enumerate(reader):
            if is_left:
                clean_row = {
                    "title": preProcess(row.get("title")),
                    "alias": None
                }
            else: #同时将已获取中药的名称和别名与方剂中的药材名进行比对
                clean_row = {
                    "title": preProcess(row.get("title")),
                    "alias": preProcess(row.get("alias"))
                }
            data_d[filename + str(i)] = clean_row
    return data_d


if __name__ == "__main__":
    #设置日志等级
    optp = optparse.OptionParser()
    optp.add_option("-v", "--verbose",
                    dest="verbose",
                    action="count",
                    default=0,
                    help="增加日志详细度（-v INFO，-vv DEBUG）")
    (opts, args) = optp.parse_args()

    log_level = logging.WARNING
    if opts.verbose == 1:
        log_level = logging.INFO
    elif opts.verbose >= 2:
        log_level = logging.DEBUG

    logging.basicConfig(level=log_level)
    logger = logging.getLogger(__name__)
    logger.info("日志系统初始化完成")


    output_file = "missing_medicine.csv"
    settings_file = "medicine_matching_settings"
    training_file = "medicine_matching_training.json"

    left_file = "../medicine_linkage_optimizing/prescpt_medicine.csv"  #主表
    right_file = "got_medicine.csv" #待匹配

    data_1 = readData(left_file, is_left=True)
    data_2 = readData(right_file, is_left=False)

    if os.path.exists(settings_file):
        print("加载已保存模型设置 ...")
        with open(settings_file, "rb") as sf:
            linker = dedupe.StaticRecordLink(sf)
    else:
        print("初始化模型并设置比对字段")
        fields = [
            dedupe.variables.String("title", has_missing=False),
            dedupe.variables.Text("alias", has_missing=True),
        ]
        linker = dedupe.RecordLink(fields)

        if os.path.exists(training_file):
            with open(training_file, "r") as tf:
                linker.prepare_training(data_1, data_2, training_file=tf, sample_size = min(len(data_1), len(data_2), 10000))
        else:
            linker.prepare_training(data_1, data_2,
                                    sample_size = min(len(data_1), len(data_2), 10000))

        dedupe.console_label(linker)
        linker.train()

        with open(training_file, "w") as tf:
            linker.write_training(tf)
        with open(settings_file, "wb") as sf:
            linker.write_settings(sf)

    #匹配
    linked_records = linker.join(data_1, data_2, threshold=0.5)

    #收集prescpt_medicine中匹配成功的记录ID
    matched_left_ids = set()
    for cluster, score in linked_records:
        for record_id in cluster:
            if record_id.startswith(left_file):
                matched_left_ids.add(record_id)

    print("正在筛选未匹配药材")
    missing_titles = []
    for record_id, record in data_1.items():
        if record_id not in matched_left_ids:
            title = record.get("title")
            if title:
                missing_titles.append(title)

    with open(output_file, "w", newline='', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerow(["title"])
        for title in sorted(missing_titles):
            writer.writerow([title])

    print(f"共找到 {len(missing_titles)} 个未匹配的药材，结果已保存至 {output_file}")
